xref: /dragonfly/sys/dev/disk/mpt/mpt_raid.c (revision ec21d9fb)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: head/sys/dev/mpt/mpt_raid.c 260058 2013-12-29 20:41:32Z marius $
43  */
44 
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47 
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50 
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_periph.h>
54 #include <bus/cam/cam_sim.h>
55 #include <bus/cam/cam_xpt.h>
56 #include <bus/cam/cam_xpt_sim.h>
57 #include <bus/cam/cam_xpt_periph.h>
58 
59 #include <sys/callout.h>
60 #include <sys/kthread.h>
61 #include <sys/sysctl.h>
62 
63 #include <machine/stdarg.h>
64 
65 struct mpt_raid_action_result
66 {
67 	union {
68 		MPI_RAID_VOL_INDICATOR	indicator_struct;
69 		uint32_t		new_settings;
70 		uint8_t			phys_disk_num;
71 	} action_data;
72 	uint16_t			action_status;
73 };
74 
75 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
76 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
77 
78 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
79 
80 static mpt_probe_handler_t	mpt_raid_probe;
81 static mpt_attach_handler_t	mpt_raid_attach;
82 static mpt_enable_handler_t	mpt_raid_enable;
83 static mpt_event_handler_t	mpt_raid_event;
84 static mpt_shutdown_handler_t	mpt_raid_shutdown;
85 static mpt_reset_handler_t	mpt_raid_ioc_reset;
86 static mpt_detach_handler_t	mpt_raid_detach;
87 
88 static struct mpt_personality mpt_raid_personality =
89 {
90 	.name		= "mpt_raid",
91 	.probe		= mpt_raid_probe,
92 	.attach		= mpt_raid_attach,
93 	.enable		= mpt_raid_enable,
94 	.event		= mpt_raid_event,
95 	.reset		= mpt_raid_ioc_reset,
96 	.shutdown	= mpt_raid_shutdown,
97 	.detach		= mpt_raid_detach,
98 };
99 
100 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
101 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
102 
103 static mpt_reply_handler_t mpt_raid_reply_handler;
104 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
105 					MSG_DEFAULT_REPLY *reply_frame);
106 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
107 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
108 static void mpt_raid_thread(void *arg);
109 static timeout_t mpt_raid_timer;
110 #if 0
111 static void mpt_enable_vol(struct mpt_softc *mpt,
112 			   struct mpt_raid_volume *mpt_vol, int enable);
113 #endif
114 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
115 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
116     struct cam_path *);
117 static void mpt_raid_sysctl_attach(struct mpt_softc *);
118 
119 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
120 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
121 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
122 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
123     const char *fmt, ...) __printflike(3, 4);
124 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
125     const char *fmt, ...) __printflike(3, 4);
126 
127 static int mpt_issue_raid_req(struct mpt_softc *mpt,
128     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
129     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
130     int write, int wait);
131 
132 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
133 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
134 
135 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
136 
137 static const char *
138 mpt_vol_type(struct mpt_raid_volume *vol)
139 {
140 	switch (vol->config_page->VolumeType) {
141 	case MPI_RAID_VOL_TYPE_IS:
142 		return ("RAID-0");
143 	case MPI_RAID_VOL_TYPE_IME:
144 		return ("RAID-1E");
145 	case MPI_RAID_VOL_TYPE_IM:
146 		return ("RAID-1");
147 	default:
148 		return ("Unknown");
149 	}
150 }
151 
152 static const char *
153 mpt_vol_state(struct mpt_raid_volume *vol)
154 {
155 	switch (vol->config_page->VolumeStatus.State) {
156 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
157 		return ("Optimal");
158 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
159 		return ("Degraded");
160 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
161 		return ("Failed");
162 	default:
163 		return ("Unknown");
164 	}
165 }
166 
167 static const char *
168 mpt_disk_state(struct mpt_raid_disk *disk)
169 {
170 	switch (disk->config_page.PhysDiskStatus.State) {
171 	case MPI_PHYSDISK0_STATUS_ONLINE:
172 		return ("Online");
173 	case MPI_PHYSDISK0_STATUS_MISSING:
174 		return ("Missing");
175 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
176 		return ("Incompatible");
177 	case MPI_PHYSDISK0_STATUS_FAILED:
178 		return ("Failed");
179 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
180 		return ("Initializing");
181 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
182 		return ("Offline Requested");
183 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
184 		return ("Failed per Host Request");
185 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
186 		return ("Offline");
187 	default:
188 		return ("Unknown");
189 	}
190 }
191 
192 static void
193 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
194 	    const char *fmt, ...)
195 {
196 	__va_list ap;
197 
198 	kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
199 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
200 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
201 	__va_start(ap, fmt);
202 	kvprintf(fmt, ap);
203 	__va_end(ap);
204 }
205 
206 static void
207 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
208 	     const char *fmt, ...)
209 {
210 	__va_list ap;
211 
212 	if (disk->volume != NULL) {
213 		kprintf("(%s:vol%d:%d): ",
214 		       device_get_nameunit(mpt->dev),
215 		       disk->volume->config_page->VolumeID,
216 		       disk->member_number);
217 	} else {
218 		kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
219 		       disk->config_page.PhysDiskBus,
220 		       disk->config_page.PhysDiskID);
221 	}
222 	__va_start(ap, fmt);
223 	kvprintf(fmt, ap);
224 	__va_end(ap);
225 }
226 
227 static void
228 mpt_raid_async(void *callback_arg, u_int32_t code,
229 	       struct cam_path *path, void *arg)
230 {
231 	struct mpt_softc *mpt;
232 
233 	mpt = (struct mpt_softc*)callback_arg;
234 	switch (code) {
235 	case AC_FOUND_DEVICE:
236 	{
237 		struct ccb_getdev *cgd;
238 		struct mpt_raid_volume *mpt_vol;
239 
240 		cgd = (struct ccb_getdev *)arg;
241 		if (cgd == NULL) {
242 			break;
243 		}
244 
245 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
246 			 cgd->ccb_h.target_id);
247 
248 		RAID_VOL_FOREACH(mpt, mpt_vol) {
249 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
250 				continue;
251 
252 			if (mpt_vol->config_page->VolumeID
253 			 == cgd->ccb_h.target_id) {
254 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
255 				break;
256 			}
257 		}
258 	}
259 	default:
260 		break;
261 	}
262 }
263 
264 static int
265 mpt_raid_probe(struct mpt_softc *mpt)
266 {
267 
268 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
269 		return (ENODEV);
270 	}
271 	return (0);
272 }
273 
274 static int
275 mpt_raid_attach(struct mpt_softc *mpt)
276 {
277 	struct ccb_setasync *csa;
278 	mpt_handler_t	 handler;
279 	int		 error;
280 
281 	mpt_callout_init(mpt, &mpt->raid_timer);
282 
283 	error = mpt_spawn_raid_thread(mpt);
284 	if (error != 0) {
285 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
286 		goto cleanup;
287 	}
288 	csa = &xpt_alloc_ccb()->csa;
289 
290 	MPT_LOCK(mpt);
291 	handler.reply_handler = mpt_raid_reply_handler;
292 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
293 				     &raid_handler_id);
294 	if (error != 0) {
295 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
296 		goto cleanup;
297 	}
298 
299 	xpt_setup_ccb(&csa->ccb_h, mpt->path, 5);
300 	csa->ccb_h.func_code = XPT_SASYNC_CB;
301 	csa->event_enable = AC_FOUND_DEVICE;
302 	csa->callback = mpt_raid_async;
303 	csa->callback_arg = mpt;
304 	xpt_action((union ccb *)csa);
305 	if (csa->ccb_h.status != CAM_REQ_CMP) {
306 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
307 			"CAM async handler.\n");
308 	}
309 	MPT_UNLOCK(mpt);
310 
311 	xpt_free_ccb(&csa->ccb_h);
312 	mpt_raid_sysctl_attach(mpt);
313 	return (0);
314 cleanup:
315 	MPT_UNLOCK(mpt);
316 	mpt_raid_detach(mpt);
317 	return (error);
318 }
319 
320 static int
321 mpt_raid_enable(struct mpt_softc *mpt)
322 {
323 
324 	return (0);
325 }
326 
327 static void
328 mpt_raid_detach(struct mpt_softc *mpt)
329 {
330 	struct ccb_setasync *csa;
331 	mpt_handler_t handler;
332 
333 	csa = &xpt_alloc_ccb()->csa;
334 	mpt_callout_drain(mpt, &mpt->raid_timer);
335 
336 	MPT_LOCK(mpt);
337 	mpt_terminate_raid_thread(mpt);
338 	handler.reply_handler = mpt_raid_reply_handler;
339 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
340 			       raid_handler_id);
341 	xpt_setup_ccb(&csa->ccb_h, mpt->path, /*priority*/5);
342 	csa->ccb_h.func_code = XPT_SASYNC_CB;
343 	csa->event_enable = 0;
344 	csa->callback = mpt_raid_async;
345 	csa->callback_arg = mpt;
346 	xpt_action((union ccb *)csa);
347 	MPT_UNLOCK(mpt);
348 
349 	xpt_free_ccb(&csa->ccb_h);
350 }
351 
352 static void
353 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
354 {
355 
356 	/* Nothing to do yet. */
357 }
358 
359 static const char *raid_event_txt[] =
360 {
361 	"Volume Created",
362 	"Volume Deleted",
363 	"Volume Settings Changed",
364 	"Volume Status Changed",
365 	"Volume Physical Disk Membership Changed",
366 	"Physical Disk Created",
367 	"Physical Disk Deleted",
368 	"Physical Disk Settings Changed",
369 	"Physical Disk Status Changed",
370 	"Domain Validation Required",
371 	"SMART Data Received",
372 	"Replace Action Started",
373 };
374 
375 static int
376 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
377 	       MSG_EVENT_NOTIFY_REPLY *msg)
378 {
379 	EVENT_DATA_RAID *raid_event;
380 	struct mpt_raid_volume *mpt_vol;
381 	struct mpt_raid_disk *mpt_disk;
382 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
383 	int i;
384 	int print_event;
385 
386 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
387 		return (0);
388 	}
389 
390 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
391 
392 	mpt_vol = NULL;
393 	vol_pg = NULL;
394 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
395 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
396 			mpt_vol = &mpt->raid_volumes[i];
397 			vol_pg = mpt_vol->config_page;
398 
399 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
400 				continue;
401 
402 			if (vol_pg->VolumeID == raid_event->VolumeID
403 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
404 				break;
405 		}
406 		if (i >= mpt->ioc_page2->MaxVolumes) {
407 			mpt_vol = NULL;
408 			vol_pg = NULL;
409 		}
410 	}
411 
412 	mpt_disk = NULL;
413 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
414 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
415 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
416 			mpt_disk = NULL;
417 		}
418 	}
419 
420 	print_event = 1;
421 	switch(raid_event->ReasonCode) {
422 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
423 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
424 		break;
425 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
426 		if (mpt_vol != NULL) {
427 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
428 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
429 			} else {
430 				/*
431 				 * Coalesce status messages into one
432 				 * per background run of our RAID thread.
433 				 * This removes "spurious" status messages
434 				 * from our output.
435 				 */
436 				print_event = 0;
437 			}
438 		}
439 		break;
440 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
441 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
442 		mpt->raid_rescan++;
443 		if (mpt_vol != NULL) {
444 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
445 		}
446 		break;
447 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
448 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
449 		mpt->raid_rescan++;
450 		break;
451 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
452 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
453 		mpt->raid_rescan++;
454 		if (mpt_disk != NULL) {
455 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
456 		}
457 		break;
458 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
459 		mpt->raid_rescan++;
460 		break;
461 	case MPI_EVENT_RAID_RC_SMART_DATA:
462 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
463 		break;
464 	}
465 
466 	if (print_event) {
467 		if (mpt_disk != NULL) {
468 			mpt_disk_prt(mpt, mpt_disk, "%s", "");
469 		} else if (mpt_vol != NULL) {
470 			mpt_vol_prt(mpt, mpt_vol, "%s", "");
471 		} else {
472 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
473 				raid_event->VolumeID);
474 
475 			if (raid_event->PhysDiskNum != 0xFF)
476 				mpt_prtc(mpt, ":%d): ",
477 					 raid_event->PhysDiskNum);
478 			else
479 				mpt_prtc(mpt, "): ");
480 		}
481 
482 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
483 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
484 				 raid_event->ReasonCode);
485 		else
486 			mpt_prtc(mpt, "%s\n",
487 				 raid_event_txt[raid_event->ReasonCode]);
488 	}
489 
490 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
491 		/* XXX Use CAM's print sense for this... */
492 		if (mpt_disk != NULL)
493 			mpt_disk_prt(mpt, mpt_disk, "%s", "");
494 		else
495 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
496 			    raid_event->VolumeBus, raid_event->VolumeID,
497 			    raid_event->PhysDiskNum);
498 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
499 			 raid_event->ASC, raid_event->ASCQ);
500 	}
501 
502 	mpt_raid_wakeup(mpt);
503 	return (1);
504 }
505 
506 static void
507 mpt_raid_shutdown(struct mpt_softc *mpt)
508 {
509 	struct mpt_raid_volume *mpt_vol;
510 
511 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
512 		return;
513 	}
514 
515 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
516 	RAID_VOL_FOREACH(mpt, mpt_vol) {
517 		mpt_verify_mwce(mpt, mpt_vol);
518 	}
519 }
520 
521 static int
522 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
523     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
524 {
525 	int free_req;
526 
527 	if (req == NULL)
528 		return (TRUE);
529 
530 	free_req = TRUE;
531 	if (reply_frame != NULL)
532 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
533 #ifdef NOTYET
534 	else if (req->ccb != NULL) {
535 		/* Complete Quiesce CCB with error... */
536 	}
537 #endif
538 
539 	req->state &= ~REQ_STATE_QUEUED;
540 	req->state |= REQ_STATE_DONE;
541 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
542 
543 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
544 		wakeup(req);
545 	} else if (free_req) {
546 		mpt_free_request(mpt, req);
547 	}
548 
549 	return (TRUE);
550 }
551 
552 /*
553  * Parse additional completion information in the reply
554  * frame for RAID I/O requests.
555  */
556 static int
557 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
558     MSG_DEFAULT_REPLY *reply_frame)
559 {
560 	MSG_RAID_ACTION_REPLY *reply;
561 	struct mpt_raid_action_result *action_result;
562 	MSG_RAID_ACTION_REQUEST *rap;
563 
564 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
565 	req->IOCStatus = le16toh(reply->IOCStatus);
566 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
567 
568 	switch (rap->Action) {
569 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
570 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
571 		break;
572 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
573 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
574 		break;
575 	default:
576 		break;
577 	}
578 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
579 	memcpy(&action_result->action_data, &reply->ActionData,
580 	    sizeof(action_result->action_data));
581 	action_result->action_status = le16toh(reply->ActionStatus);
582 	return (TRUE);
583 }
584 
585 /*
586  * Utiltity routine to perform a RAID action command;
587  */
588 static int
589 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
590 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
591 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
592 		   int write, int wait)
593 {
594 	MSG_RAID_ACTION_REQUEST *rap;
595 	SGE_SIMPLE32 *se;
596 
597 	rap = req->req_vbuf;
598 	memset(rap, 0, sizeof *rap);
599 	rap->Action = Action;
600 	rap->ActionDataWord = htole32(ActionDataWord);
601 	rap->Function = MPI_FUNCTION_RAID_ACTION;
602 	rap->VolumeID = vol->config_page->VolumeID;
603 	rap->VolumeBus = vol->config_page->VolumeBus;
604 	if (disk != NULL)
605 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
606 	else
607 		rap->PhysDiskNum = 0xFF;
608 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
609 	se->Address = htole32(addr);
610 	MPI_pSGE_SET_LENGTH(se, len);
611 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
612 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
613 	    MPI_SGE_FLAGS_END_OF_LIST |
614 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
615 	se->FlagsLength = htole32(se->FlagsLength);
616 	rap->MsgContext = htole32(req->index | raid_handler_id);
617 
618 	mpt_check_doorbell(mpt);
619 	mpt_send_cmd(mpt, req);
620 
621 	if (wait) {
622 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
623 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
624 	} else {
625 		return (0);
626 	}
627 }
628 
629 /*************************** RAID Status Monitoring ***************************/
630 static int
631 mpt_spawn_raid_thread(struct mpt_softc *mpt)
632 {
633 	int error;
634 
635 	/*
636 	 * Freeze out any CAM transactions until our thread
637 	 * is able to run at least once.  We need to update
638 	 * our RAID pages before acception I/O or we may
639 	 * reject I/O to an ID we later determine is for a
640 	 * hidden physdisk.
641 	 */
642 	MPT_LOCK(mpt);
643 	xpt_freeze_simq(mpt->phydisk_sim, 1);
644 	MPT_UNLOCK(mpt);
645 	error = kthread_create(mpt_raid_thread, mpt,
646 	    &mpt->raid_thread, "mpt_raid%d", mpt->unit);
647 	if (error != 0) {
648 		MPT_LOCK(mpt);
649 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
650 		MPT_UNLOCK(mpt);
651 	}
652 	return (error);
653 }
654 
655 static void
656 mpt_terminate_raid_thread(struct mpt_softc *mpt)
657 {
658 
659 	if (mpt->raid_thread == NULL) {
660 		return;
661 	}
662 	mpt->shutdwn_raid = 1;
663 	wakeup(&mpt->raid_volumes);
664 	/*
665 	 * Sleep on a slightly different location
666 	 * for this interlock just for added safety.
667 	 */
668 	mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
669 }
670 
671 static void
672 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
673 {
674     xpt_free_path(ccb->ccb_h.path);
675     kfree(ccb, M_TEMP);
676 }
677 
678 static void
679 mpt_raid_thread(void *arg)
680 {
681 	struct mpt_softc *mpt;
682 	int firstrun;
683 
684 	mpt = (struct mpt_softc *)arg;
685 	firstrun = 1;
686 	MPT_LOCK(mpt);
687 	while (mpt->shutdwn_raid == 0) {
688 
689 		if (mpt->raid_wakeup == 0) {
690 			mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
691 			continue;
692 		}
693 
694 		mpt->raid_wakeup = 0;
695 
696 		if (mpt_refresh_raid_data(mpt)) {
697 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
698 			continue;
699 		}
700 
701 		/*
702 		 * Now that we have our first snapshot of RAID data,
703 		 * allow CAM to access our physical disk bus.
704 		 */
705 		if (firstrun) {
706 			firstrun = 0;
707 			xpt_release_simq(mpt->phydisk_sim, TRUE);
708 		}
709 
710 		if (mpt->raid_rescan != 0) {
711 			union ccb *ccb;
712 			int error;
713 
714 			mpt->raid_rescan = 0;
715 			MPT_UNLOCK(mpt);
716 
717 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
718 			    M_WAITOK | M_ZERO);
719 
720 			MPT_LOCK(mpt);
721 			error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
722 			    cam_sim_path(mpt->phydisk_sim),
723 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
724 			if (error != CAM_REQ_CMP) {
725 				kfree(ccb, M_TEMP);
726 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
727 			} else {
728 				xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
729 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
730 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
731 				ccb->crcn.flags = CAM_FLAG_NONE;
732 				xpt_action(ccb);
733 
734 				/* scan is now in progress */
735 			}
736 		}
737 	}
738 	mpt->raid_thread = NULL;
739 	wakeup(&mpt->raid_thread);
740 	MPT_UNLOCK(mpt);
741 	kthread_exit();
742 }
743 
744 #if 0
745 static void
746 mpt_raid_quiesce_timeout(void *arg)
747 {
748 
749 	/* Complete the CCB with error */
750 	/* COWWWW */
751 }
752 
753 static timeout_t mpt_raid_quiesce_timeout;
754 cam_status
755 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
756 		      request_t *req)
757 {
758 	union ccb *ccb;
759 
760 	ccb = req->ccb;
761 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
762 		return (CAM_REQ_CMP);
763 
764 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
765 		int rv;
766 
767 		mpt_disk->flags |= MPT_RDF_QUIESCING;
768 		xpt_freeze_devq(ccb->ccb_h.path, 1);
769 
770 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
771 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
772 					/*ActionData*/0, /*addr*/0,
773 					/*len*/0, /*write*/FALSE,
774 					/*wait*/FALSE);
775 		if (rv != 0)
776 			return (CAM_REQ_CMP_ERR);
777 
778 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
779 #if 0
780 		if (rv == ETIMEDOUT) {
781 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
782 				     "Quiece Timed-out\n");
783 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
784 			return (CAM_REQ_CMP_ERR);
785 		}
786 
787 		ar = REQ_TO_RAID_ACTION_RESULT(req);
788 		if (rv != 0
789 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
790 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
791 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
792 				    "%d:%x:%x\n", rv, req->IOCStatus,
793 				    ar->action_status);
794 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
795 			return (CAM_REQ_CMP_ERR);
796 		}
797 #endif
798 		return (CAM_REQ_INPROG);
799 	}
800 	return (CAM_REQUEUE_REQ);
801 }
802 #endif
803 
804 /* XXX Ignores that there may be multiple busses/IOCs involved. */
805 cam_status
806 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
807 {
808 	struct mpt_raid_disk *mpt_disk;
809 
810 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
811 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
812 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
813 		*tgt = mpt_disk->config_page.PhysDiskID;
814 		return (0);
815 	}
816 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
817 		 ccb->ccb_h.target_id);
818 	return (-1);
819 }
820 
821 /* XXX Ignores that there may be multiple busses/IOCs involved. */
822 int
823 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
824 {
825 	struct mpt_raid_disk *mpt_disk;
826 	int i;
827 
828 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
829 		return (0);
830 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
831 		mpt_disk = &mpt->raid_disks[i];
832 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
833 		    mpt_disk->config_page.PhysDiskID == tgt)
834 			return (1);
835 	}
836 	return (0);
837 
838 }
839 
840 /* XXX Ignores that there may be multiple busses/IOCs involved. */
841 int
842 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
843 {
844 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
845 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
846 
847 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
848 		return (0);
849 	}
850 	ioc_vol = mpt->ioc_page2->RaidVolume;
851 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
852 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
853 		if (ioc_vol->VolumeID == tgt) {
854 			return (1);
855 		}
856 	}
857 	return (0);
858 }
859 
860 #if 0
861 static void
862 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
863 	       int enable)
864 {
865 	request_t *req;
866 	struct mpt_raid_action_result *ar;
867 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
868 	int enabled;
869 	int rv;
870 
871 	vol_pg = mpt_vol->config_page;
872 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
873 
874 	/*
875 	 * If the setting matches the configuration,
876 	 * there is nothing to do.
877 	 */
878 	if ((enabled && enable)
879 	 || (!enabled && !enable))
880 		return;
881 
882 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
883 	if (req == NULL) {
884 		mpt_vol_prt(mpt, mpt_vol,
885 			    "mpt_enable_vol: Get request failed!\n");
886 		return;
887 	}
888 
889 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
890 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
891 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
892 				/*data*/0, /*addr*/0, /*len*/0,
893 				/*write*/FALSE, /*wait*/TRUE);
894 	if (rv == ETIMEDOUT) {
895 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
896 			    "%s Volume Timed-out\n",
897 			    enable ? "Enable" : "Disable");
898 		return;
899 	}
900 	ar = REQ_TO_RAID_ACTION_RESULT(req);
901 	if (rv != 0
902 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
903 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
904 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
905 			    enable ? "Enable" : "Disable",
906 			    rv, req->IOCStatus, ar->action_status);
907 	}
908 
909 	mpt_free_request(mpt, req);
910 }
911 #endif
912 
913 static void
914 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
915 {
916 	request_t *req;
917 	struct mpt_raid_action_result *ar;
918 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
919 	uint32_t data;
920 	int rv;
921 	int resyncing;
922 	int mwce;
923 
924 	vol_pg = mpt_vol->config_page;
925 	resyncing = vol_pg->VolumeStatus.Flags
926 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
927 	mwce = vol_pg->VolumeSettings.Settings
928 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
929 
930 	/*
931 	 * If the setting matches the configuration,
932 	 * there is nothing to do.
933 	 */
934 	switch (mpt->raid_mwce_setting) {
935 	case MPT_RAID_MWCE_REBUILD_ONLY:
936 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
937 			return;
938 		}
939 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
940 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
941 			/*
942 			 * Wait one more status update to see if
943 			 * resyncing gets enabled.  It gets disabled
944 			 * temporarilly when WCE is changed.
945 			 */
946 			return;
947 		}
948 		break;
949 	case MPT_RAID_MWCE_ON:
950 		if (mwce)
951 			return;
952 		break;
953 	case MPT_RAID_MWCE_OFF:
954 		if (!mwce)
955 			return;
956 		break;
957 	case MPT_RAID_MWCE_NC:
958 		return;
959 	}
960 
961 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
962 	if (req == NULL) {
963 		mpt_vol_prt(mpt, mpt_vol,
964 			    "mpt_verify_mwce: Get request failed!\n");
965 		return;
966 	}
967 
968 	vol_pg->VolumeSettings.Settings ^=
969 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
970 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
971 	vol_pg->VolumeSettings.Settings ^=
972 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
973 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
974 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
975 				data, /*addr*/0, /*len*/0,
976 				/*write*/FALSE, /*wait*/TRUE);
977 	if (rv == ETIMEDOUT) {
978 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
979 			    "Write Cache Enable Timed-out\n");
980 		return;
981 	}
982 	ar = REQ_TO_RAID_ACTION_RESULT(req);
983 	if (rv != 0
984 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
985 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
986 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
987 			    "%d:%x:%x\n", rv, req->IOCStatus,
988 			    ar->action_status);
989 	} else {
990 		vol_pg->VolumeSettings.Settings ^=
991 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
992 	}
993 	mpt_free_request(mpt, req);
994 }
995 
996 static void
997 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
998 {
999 	request_t *req;
1000 	struct mpt_raid_action_result *ar;
1001 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1002 	u_int prio;
1003 	int rv;
1004 
1005 	vol_pg = mpt_vol->config_page;
1006 
1007 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1008 		return;
1009 
1010 	/*
1011 	 * If the current RAID resync rate does not
1012 	 * match our configured rate, update it.
1013 	 */
1014 	prio = vol_pg->VolumeSettings.Settings
1015 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1016 	if (vol_pg->ResyncRate != 0
1017 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1018 
1019 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1020 		if (req == NULL) {
1021 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1022 				    "Get request failed!\n");
1023 			return;
1024 		}
1025 
1026 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1027 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1028 					mpt->raid_resync_rate, /*addr*/0,
1029 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1030 		if (rv == ETIMEDOUT) {
1031 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1032 				    "Resync Rate Setting Timed-out\n");
1033 			return;
1034 		}
1035 
1036 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1037 		if (rv != 0
1038 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1039 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1040 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1041 				    "%d:%x:%x\n", rv, req->IOCStatus,
1042 				    ar->action_status);
1043 		} else
1044 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1045 		mpt_free_request(mpt, req);
1046 	} else if ((prio && mpt->raid_resync_rate < 128)
1047 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1048 		uint32_t data;
1049 
1050 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1051 		if (req == NULL) {
1052 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1053 				    "Get request failed!\n");
1054 			return;
1055 		}
1056 
1057 		vol_pg->VolumeSettings.Settings ^=
1058 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1059 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1060 		vol_pg->VolumeSettings.Settings ^=
1061 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1062 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1063 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1064 					data, /*addr*/0, /*len*/0,
1065 					/*write*/FALSE, /*wait*/TRUE);
1066 		if (rv == ETIMEDOUT) {
1067 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1068 				    "Resync Rate Setting Timed-out\n");
1069 			return;
1070 		}
1071 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1072 		if (rv != 0
1073 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1074 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1075 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1076 				    "%d:%x:%x\n", rv, req->IOCStatus,
1077 				    ar->action_status);
1078 		} else {
1079 			vol_pg->VolumeSettings.Settings ^=
1080 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1081 		}
1082 
1083 		mpt_free_request(mpt, req);
1084 	}
1085 }
1086 
1087 static void
1088 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1089 		       struct cam_path *path)
1090 {
1091 	struct ccb_relsim *crs;
1092 
1093 	crs = &xpt_alloc_ccb()->crs;
1094 	xpt_setup_ccb(&crs->ccb_h, path, /*priority*/5);
1095 	crs->ccb_h.func_code = XPT_REL_SIMQ;
1096 	crs->ccb_h.flags = CAM_DEV_QFREEZE;
1097 	crs->release_flags = RELSIM_ADJUST_OPENINGS;
1098 	crs->openings = mpt->raid_queue_depth;
1099 	xpt_action((union ccb *)crs);
1100 	if (crs->ccb_h.status != CAM_REQ_CMP) {
1101 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1102 			    "with CAM status %#x\n", crs->ccb_h.status);
1103 	}
1104 	xpt_free_ccb(&crs->ccb_h);
1105 }
1106 
1107 static void
1108 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1109 {
1110 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1111 	u_int i;
1112 
1113 	vol_pg = mpt_vol->config_page;
1114 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1115 	for (i = 1; i <= 0x8000; i <<= 1) {
1116 		switch (vol_pg->VolumeSettings.Settings & i) {
1117 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1118 			mpt_prtc(mpt, " Member-WCE");
1119 			break;
1120 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1121 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1122 			break;
1123 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1124 			mpt_prtc(mpt, " Hot-Plug-Spares");
1125 			break;
1126 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1127 			mpt_prtc(mpt, " High-Priority-ReSync");
1128 			break;
1129 		default:
1130 			break;
1131 		}
1132 	}
1133 	mpt_prtc(mpt, " )\n");
1134 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1135 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1136 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1137 			  ? ":" : "s:");
1138 		for (i = 0; i < 8; i++) {
1139 			u_int mask;
1140 
1141 			mask = 0x1 << i;
1142 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1143 				continue;
1144 			mpt_prtc(mpt, " %d", i);
1145 		}
1146 		mpt_prtc(mpt, "\n");
1147 	}
1148 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1149 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1150 		struct mpt_raid_disk *mpt_disk;
1151 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1152 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1153 		U8 f, s;
1154 
1155 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1156 		disk_pg = &mpt_disk->config_page;
1157 		mpt_prtc(mpt, "      ");
1158 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1159 			 pt_bus, disk_pg->PhysDiskID);
1160 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1161 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1162 			    "Primary" : "Secondary");
1163 		} else {
1164 			mpt_prtc(mpt, "Stripe Position %d",
1165 				 mpt_disk->member_number);
1166 		}
1167 		f = disk_pg->PhysDiskStatus.Flags;
1168 		s = disk_pg->PhysDiskStatus.State;
1169 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1170 			mpt_prtc(mpt, " Out of Sync");
1171 		}
1172 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1173 			mpt_prtc(mpt, " Quiesced");
1174 		}
1175 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1176 			mpt_prtc(mpt, " Inactive");
1177 		}
1178 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1179 			mpt_prtc(mpt, " Was Optimal");
1180 		}
1181 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1182 			mpt_prtc(mpt, " Was Non-Optimal");
1183 		}
1184 		switch (s) {
1185 		case MPI_PHYSDISK0_STATUS_ONLINE:
1186 			mpt_prtc(mpt, " Online");
1187 			break;
1188 		case MPI_PHYSDISK0_STATUS_MISSING:
1189 			mpt_prtc(mpt, " Missing");
1190 			break;
1191 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1192 			mpt_prtc(mpt, " Incompatible");
1193 			break;
1194 		case MPI_PHYSDISK0_STATUS_FAILED:
1195 			mpt_prtc(mpt, " Failed");
1196 			break;
1197 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1198 			mpt_prtc(mpt, " Initializing");
1199 			break;
1200 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1201 			mpt_prtc(mpt, " Requested Offline");
1202 			break;
1203 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1204 			mpt_prtc(mpt, " Requested Failed");
1205 			break;
1206 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1207 		default:
1208 			mpt_prtc(mpt, " Offline Other (%x)", s);
1209 			break;
1210 		}
1211 		mpt_prtc(mpt, "\n");
1212 	}
1213 }
1214 
1215 static void
1216 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1217 {
1218 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1219 	int rd_bus = cam_sim_bus(mpt->sim);
1220 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1221 	u_int i;
1222 
1223 	disk_pg = &mpt_disk->config_page;
1224 	mpt_disk_prt(mpt, mpt_disk,
1225 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1226 		     device_get_nameunit(mpt->dev), rd_bus,
1227 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1228 		     pt_bus, (int)(mpt_disk - mpt->raid_disks));
1229 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1230 		return;
1231 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1232 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1233 		   ? ":" : "s:");
1234 	for (i = 0; i < 8; i++) {
1235 		u_int mask;
1236 
1237 		mask = 0x1 << i;
1238 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1239 			continue;
1240 		mpt_prtc(mpt, " %d", i);
1241 	}
1242 	mpt_prtc(mpt, "\n");
1243 }
1244 
1245 static void
1246 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1247 		      IOC_3_PHYS_DISK *ioc_disk)
1248 {
1249 	int rv;
1250 
1251 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1252 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1253 				 &mpt_disk->config_page.Header,
1254 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1255 	if (rv != 0) {
1256 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1257 			"Failed to read RAID Disk Hdr(%d)\n",
1258 			ioc_disk->PhysDiskNum);
1259 		return;
1260 	}
1261 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1262 				   &mpt_disk->config_page.Header,
1263 				   sizeof(mpt_disk->config_page),
1264 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1265 	if (rv != 0)
1266 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1267 			"Failed to read RAID Disk Page(%d)\n",
1268 			ioc_disk->PhysDiskNum);
1269 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1270 }
1271 
1272 static void
1273 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1274     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1275 {
1276 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1277 	struct mpt_raid_action_result *ar;
1278 	request_t *req;
1279 	int rv;
1280 	int i;
1281 
1282 	vol_pg = mpt_vol->config_page;
1283 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1284 
1285 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1286 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1287 	if (rv != 0) {
1288 		mpt_vol_prt(mpt, mpt_vol,
1289 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1290 		    ioc_vol->VolumePageNumber);
1291 		return;
1292 	}
1293 
1294 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1295 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1296 	if (rv != 0) {
1297 		mpt_vol_prt(mpt, mpt_vol,
1298 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1299 		    ioc_vol->VolumePageNumber);
1300 		return;
1301 	}
1302 	mpt2host_config_page_raid_vol_0(vol_pg);
1303 
1304 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1305 
1306 	/* Update disk entry array data. */
1307 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1308 		struct mpt_raid_disk *mpt_disk;
1309 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1310 		mpt_disk->volume = mpt_vol;
1311 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1312 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1313 			mpt_disk->member_number--;
1314 		}
1315 	}
1316 
1317 	if ((vol_pg->VolumeStatus.Flags
1318 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1319 		return;
1320 
1321 	req = mpt_get_request(mpt, TRUE);
1322 	if (req == NULL) {
1323 		mpt_vol_prt(mpt, mpt_vol,
1324 		    "mpt_refresh_raid_vol: Get request failed!\n");
1325 		return;
1326 	}
1327 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1328 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1329 	if (rv == ETIMEDOUT) {
1330 		mpt_vol_prt(mpt, mpt_vol,
1331 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1332 		mpt_free_request(mpt, req);
1333 		return;
1334 	}
1335 
1336 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1337 	if (rv == 0
1338 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1339 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1340 		memcpy(&mpt_vol->sync_progress,
1341 		       &ar->action_data.indicator_struct,
1342 		       sizeof(mpt_vol->sync_progress));
1343 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1344 	} else {
1345 		mpt_vol_prt(mpt, mpt_vol,
1346 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1347 	}
1348 	mpt_free_request(mpt, req);
1349 }
1350 
1351 /*
1352  * Update in-core information about RAID support.  We update any entries
1353  * that didn't previously exists or have been marked as needing to
1354  * be updated by our event handler.  Interesting changes are displayed
1355  * to the console.
1356  */
1357 static int
1358 mpt_refresh_raid_data(struct mpt_softc *mpt)
1359 {
1360 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1361 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1362 	IOC_3_PHYS_DISK *ioc_disk;
1363 	IOC_3_PHYS_DISK *ioc_last_disk;
1364 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1365 	size_t len;
1366 	int rv;
1367 	int i;
1368 	u_int nonopt_volumes;
1369 
1370 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1371 		return (0);
1372 	}
1373 
1374 	/*
1375 	 * Mark all items as unreferenced by the configuration.
1376 	 * This allows us to find, report, and discard stale
1377 	 * entries.
1378 	 */
1379 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1380 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1381 	}
1382 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1383 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1384 	}
1385 
1386 	/*
1387 	 * Get Physical Disk information.
1388 	 */
1389 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1390 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1391 				   &mpt->ioc_page3->Header, len,
1392 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1393 	if (rv) {
1394 		mpt_prt(mpt,
1395 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1396 		return (-1);
1397 	}
1398 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1399 
1400 	ioc_disk = mpt->ioc_page3->PhysDisk;
1401 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1402 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1403 		struct mpt_raid_disk *mpt_disk;
1404 
1405 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1406 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1407 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1408 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1409 
1410 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1411 
1412 		}
1413 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1414 		mpt->raid_rescan++;
1415 	}
1416 
1417 	/*
1418 	 * Refresh volume data.
1419 	 */
1420 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1421 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1422 				   &mpt->ioc_page2->Header, len,
1423 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1424 	if (rv) {
1425 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1426 			"Failed to read IOC Page 2\n");
1427 		return (-1);
1428 	}
1429 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1430 
1431 	ioc_vol = mpt->ioc_page2->RaidVolume;
1432 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1433 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1434 		struct mpt_raid_volume *mpt_vol;
1435 
1436 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1437 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1438 		vol_pg = mpt_vol->config_page;
1439 		if (vol_pg == NULL)
1440 			continue;
1441 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1442 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1443 		 || (vol_pg->VolumeStatus.Flags
1444 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1445 
1446 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1447 		}
1448 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1449 	}
1450 
1451 	nonopt_volumes = 0;
1452 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1453 		struct mpt_raid_volume *mpt_vol;
1454 		uint64_t total;
1455 		uint64_t left;
1456 		int m;
1457 		u_int prio;
1458 
1459 		mpt_vol = &mpt->raid_volumes[i];
1460 
1461 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1462 			continue;
1463 		}
1464 
1465 		vol_pg = mpt_vol->config_page;
1466 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1467 		 == MPT_RVF_ANNOUNCED) {
1468 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1469 			mpt_vol->flags = 0;
1470 			continue;
1471 		}
1472 
1473 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1474 			mpt_announce_vol(mpt, mpt_vol);
1475 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1476 		}
1477 
1478 		if (vol_pg->VolumeStatus.State !=
1479 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1480 			nonopt_volumes++;
1481 
1482 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1483 			continue;
1484 
1485 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1486 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1487 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1488 		mpt_verify_mwce(mpt, mpt_vol);
1489 
1490 		if (vol_pg->VolumeStatus.Flags == 0) {
1491 			continue;
1492 		}
1493 
1494 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1495 		for (m = 1; m <= 0x80; m <<= 1) {
1496 			switch (vol_pg->VolumeStatus.Flags & m) {
1497 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1498 				mpt_prtc(mpt, " Enabled");
1499 				break;
1500 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1501 				mpt_prtc(mpt, " Quiesced");
1502 				break;
1503 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1504 				mpt_prtc(mpt, " Re-Syncing");
1505 				break;
1506 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1507 				mpt_prtc(mpt, " Inactive");
1508 				break;
1509 			default:
1510 				break;
1511 			}
1512 		}
1513 		mpt_prtc(mpt, " )\n");
1514 
1515 		if ((vol_pg->VolumeStatus.Flags
1516 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1517 			continue;
1518 
1519 		mpt_verify_resync_rate(mpt, mpt_vol);
1520 
1521 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1522 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1523 		if (vol_pg->ResyncRate != 0) {
1524 
1525 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1526 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1527 			    prio / 1000, prio % 1000);
1528 		} else {
1529 			prio = vol_pg->VolumeSettings.Settings
1530 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1531 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1532 			    prio ? "High" : "Low");
1533 		}
1534 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1535 			    "blocks remaining\n", (uintmax_t)left,
1536 			    (uintmax_t)total);
1537 
1538 		/* Periodically report on sync progress. */
1539 		mpt_schedule_raid_refresh(mpt);
1540 	}
1541 
1542 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1543 		struct mpt_raid_disk *mpt_disk;
1544 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1545 		int m;
1546 
1547 		mpt_disk = &mpt->raid_disks[i];
1548 		disk_pg = &mpt_disk->config_page;
1549 
1550 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1551 			continue;
1552 
1553 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1554 		 == MPT_RDF_ANNOUNCED) {
1555 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1556 			mpt_disk->flags = 0;
1557 			mpt->raid_rescan++;
1558 			continue;
1559 		}
1560 
1561 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1562 
1563 			mpt_announce_disk(mpt, mpt_disk);
1564 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1565 		}
1566 
1567 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1568 			continue;
1569 
1570 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1571 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1572 		if (disk_pg->PhysDiskStatus.Flags == 0)
1573 			continue;
1574 
1575 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1576 		for (m = 1; m <= 0x80; m <<= 1) {
1577 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1578 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1579 				mpt_prtc(mpt, " Out-Of-Sync");
1580 				break;
1581 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1582 				mpt_prtc(mpt, " Quiesced");
1583 				break;
1584 			default:
1585 				break;
1586 			}
1587 		}
1588 		mpt_prtc(mpt, " )\n");
1589 	}
1590 
1591 	mpt->raid_nonopt_volumes = nonopt_volumes;
1592 	return (0);
1593 }
1594 
1595 static void
1596 mpt_raid_timer(void *arg)
1597 {
1598 	struct mpt_softc *mpt;
1599 
1600 	mpt = (struct mpt_softc *)arg;
1601 	MPT_LOCK(mpt);
1602 	mpt_raid_wakeup(mpt);
1603 	MPT_UNLOCK(mpt);
1604 }
1605 
1606 static void
1607 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1608 {
1609 
1610 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1611 		      mpt_raid_timer, mpt);
1612 }
1613 
1614 void
1615 mpt_raid_free_mem(struct mpt_softc *mpt)
1616 {
1617 
1618 	if (mpt->raid_volumes) {
1619 		struct mpt_raid_volume *mpt_raid;
1620 		int i;
1621 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1622 			mpt_raid = &mpt->raid_volumes[i];
1623 			if (mpt_raid->config_page) {
1624 				kfree(mpt_raid->config_page, M_DEVBUF);
1625 				mpt_raid->config_page = NULL;
1626 			}
1627 		}
1628 		kfree(mpt->raid_volumes, M_DEVBUF);
1629 		mpt->raid_volumes = NULL;
1630 	}
1631 	if (mpt->raid_disks) {
1632 		kfree(mpt->raid_disks, M_DEVBUF);
1633 		mpt->raid_disks = NULL;
1634 	}
1635 	if (mpt->ioc_page2) {
1636 		kfree(mpt->ioc_page2, M_DEVBUF);
1637 		mpt->ioc_page2 = NULL;
1638 	}
1639 	if (mpt->ioc_page3) {
1640 		kfree(mpt->ioc_page3, M_DEVBUF);
1641 		mpt->ioc_page3 = NULL;
1642 	}
1643 	mpt->raid_max_volumes =  0;
1644 	mpt->raid_max_disks =  0;
1645 }
1646 
1647 static int
1648 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1649 {
1650 	struct mpt_raid_volume *mpt_vol;
1651 
1652 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1653 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1654 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1655 		return (EINVAL);
1656 
1657 	MPT_LOCK(mpt);
1658 	mpt->raid_resync_rate = rate;
1659 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1660 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1661 			continue;
1662 		}
1663 		mpt_verify_resync_rate(mpt, mpt_vol);
1664 	}
1665 	MPT_UNLOCK(mpt);
1666 	return (0);
1667 }
1668 
1669 static int
1670 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1671 {
1672 	struct mpt_raid_volume *mpt_vol;
1673 
1674 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1675 		return (EINVAL);
1676 
1677 	MPT_LOCK(mpt);
1678 	mpt->raid_queue_depth = vol_queue_depth;
1679 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1680 		struct cam_path *path;
1681 		int error;
1682 
1683 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1684 			continue;
1685 
1686 		mpt->raid_rescan = 0;
1687 
1688 		error = xpt_create_path(&path, xpt_periph,
1689 					cam_sim_path(mpt->sim),
1690 					mpt_vol->config_page->VolumeID,
1691 					/*lun*/0);
1692 		if (error != CAM_REQ_CMP) {
1693 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1694 			continue;
1695 		}
1696 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1697 		xpt_free_path(path);
1698 	}
1699 	MPT_UNLOCK(mpt);
1700 	return (0);
1701 }
1702 
1703 static int
1704 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1705 {
1706 	struct mpt_raid_volume *mpt_vol;
1707 	int force_full_resync;
1708 
1709 	MPT_LOCK(mpt);
1710 	if (mwce == mpt->raid_mwce_setting) {
1711 		MPT_UNLOCK(mpt);
1712 		return (0);
1713 	}
1714 
1715 	/*
1716 	 * Catch MWCE being left on due to a failed shutdown.  Since
1717 	 * sysctls cannot be set by the loader, we treat the first
1718 	 * setting of this varible specially and force a full volume
1719 	 * resync if MWCE is enabled and a resync is in progress.
1720 	 */
1721 	force_full_resync = 0;
1722 	if (mpt->raid_mwce_set == 0
1723 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1724 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1725 		force_full_resync = 1;
1726 
1727 	mpt->raid_mwce_setting = mwce;
1728 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1729 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1730 		int resyncing;
1731 		int mwce;
1732 
1733 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1734 			continue;
1735 
1736 		vol_pg = mpt_vol->config_page;
1737 		resyncing = vol_pg->VolumeStatus.Flags
1738 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1739 		mwce = vol_pg->VolumeSettings.Settings
1740 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1741 		if (force_full_resync && resyncing && mwce) {
1742 
1743 			/*
1744 			 * XXX disable/enable volume should force a resync,
1745 			 *     but we'll need to queice, drain, and restart
1746 			 *     I/O to do that.
1747 			 */
1748 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1749 				    "detected.  Suggest full resync.\n");
1750 		}
1751 		mpt_verify_mwce(mpt, mpt_vol);
1752 	}
1753 	mpt->raid_mwce_set = 1;
1754 	MPT_UNLOCK(mpt);
1755 	return (0);
1756 }
1757 
1758 static const char *mpt_vol_mwce_strs[] =
1759 {
1760 	"On",
1761 	"Off",
1762 	"On-During-Rebuild",
1763 	"NC"
1764 };
1765 
1766 static int
1767 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1768 {
1769 	char inbuf[20];
1770 	struct mpt_softc *mpt;
1771 	const char *str;
1772 	int error;
1773 	u_int size;
1774 	u_int i;
1775 
1776 	mpt = (struct mpt_softc *)arg1;
1777 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1778 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1779 	if (error || !req->newptr) {
1780 		return (error);
1781 	}
1782 
1783 	size = req->newlen - req->newidx;
1784 	if (size >= sizeof(inbuf)) {
1785 		return (EINVAL);
1786 	}
1787 
1788 	error = SYSCTL_IN(req, inbuf, size);
1789 	if (error) {
1790 		return (error);
1791 	}
1792 	inbuf[size] = '\0';
1793 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1794 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1795 			return (mpt_raid_set_vol_mwce(mpt, i));
1796 		}
1797 	}
1798 	return (EINVAL);
1799 }
1800 
1801 static int
1802 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1803 {
1804 	struct mpt_softc *mpt;
1805 	u_int raid_resync_rate;
1806 	int error;
1807 
1808 	mpt = (struct mpt_softc *)arg1;
1809 	raid_resync_rate = mpt->raid_resync_rate;
1810 
1811 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1812 	if (error || !req->newptr) {
1813 		return error;
1814 	}
1815 
1816 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1817 }
1818 
1819 static int
1820 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1821 {
1822 	struct mpt_softc *mpt;
1823 	u_int raid_queue_depth;
1824 	int error;
1825 
1826 	mpt = (struct mpt_softc *)arg1;
1827 	raid_queue_depth = mpt->raid_queue_depth;
1828 
1829 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1830 	if (error || !req->newptr) {
1831 		return error;
1832 	}
1833 
1834 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1835 }
1836 
1837 static void
1838 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1839 {
1840 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1841 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1842 
1843 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1844 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1845 			mpt_raid_sysctl_vol_member_wce, "A",
1846 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1847 
1848 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1849 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1850 			mpt_raid_sysctl_vol_queue_depth, "I",
1851 			"default volume queue depth");
1852 
1853 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1854 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1855 			mpt_raid_sysctl_vol_resync_rate, "I",
1856 			"volume resync priority (0 == NC, 1 - 255)");
1857 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1858 			"nonoptimal_volumes", CTLFLAG_RD,
1859 			&mpt->raid_nonopt_volumes, 0,
1860 			"number of nonoptimal volumes");
1861 }
1862