xref: /dragonfly/sys/dev/disk/mpt/mpt_user.c (revision ad9f8794)
1 /*-
2  * Copyright (c) 2008 Yahoo!, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the author nor the names of any co-contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31  *
32  * $FreeBSD: src/sys/dev/mpt/mpt_user.c,v 1.5 2011/03/06 12:48:15 marius Exp $
33  */
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/device.h>
38 #include <sys/errno.h>
39 #include <sys/ioccom.h>
40 #include <sys/mpt_ioctl.h>
41 
42 #include <dev/disk/mpt/mpt.h>
43 
44 struct mpt_user_raid_action_result {
45 	uint32_t	volume_status;
46 	uint32_t	action_data[4];
47 	uint16_t	action_status;
48 };
49 
50 struct mpt_page_memory {
51 	bus_dma_tag_t	tag;
52 	bus_dmamap_t	map;
53 	bus_addr_t	paddr;
54 	void		*vaddr;
55 };
56 
57 static mpt_probe_handler_t	mpt_user_probe;
58 static mpt_attach_handler_t	mpt_user_attach;
59 static mpt_enable_handler_t	mpt_user_enable;
60 static mpt_ready_handler_t	mpt_user_ready;
61 static mpt_event_handler_t	mpt_user_event;
62 static mpt_reset_handler_t	mpt_user_reset;
63 static mpt_detach_handler_t	mpt_user_detach;
64 
65 static struct mpt_personality mpt_user_personality = {
66 	.name		= "mpt_user",
67 	.probe		= mpt_user_probe,
68 	.attach		= mpt_user_attach,
69 	.enable		= mpt_user_enable,
70 	.ready		= mpt_user_ready,
71 	.event		= mpt_user_event,
72 	.reset		= mpt_user_reset,
73 	.detach		= mpt_user_detach,
74 };
75 
76 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
77 
78 static mpt_reply_handler_t	mpt_user_reply_handler;
79 
80 static d_open_t		mpt_open;
81 static d_close_t	mpt_close;
82 static d_ioctl_t	mpt_ioctl;
83 
84 static struct dev_ops mpt_ops = {
85 	{ "mpt", 0, 0 },
86 	.d_open =	mpt_open,
87 	.d_close =	mpt_close,
88 	.d_ioctl =	mpt_ioctl,
89 };
90 
91 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
92 
93 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
94 
95 int
96 mpt_user_probe(struct mpt_softc *mpt)
97 {
98 
99 	/* Attach to every controller. */
100 	return (0);
101 }
102 
103 int
104 mpt_user_attach(struct mpt_softc *mpt)
105 {
106 	mpt_handler_t handler;
107 	int error, unit;
108 
109 	MPT_LOCK(mpt);
110 	handler.reply_handler = mpt_user_reply_handler;
111 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
112 				     &user_handler_id);
113 	MPT_UNLOCK(mpt);
114 	if (error != 0) {
115 		mpt_prt(mpt, "Unable to register user handler!\n");
116 		return (error);
117 	}
118 	unit = device_get_unit(mpt->dev);
119 	mpt->cdev = make_dev(&mpt_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
120 	    "mpt%d", unit);
121 	if (mpt->cdev == NULL) {
122 		MPT_LOCK(mpt);
123 		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
124 		    user_handler_id);
125 		MPT_UNLOCK(mpt);
126 		return (ENOMEM);
127 	}
128 	mpt->cdev->si_drv1 = mpt;
129 	return (0);
130 }
131 
132 int
133 mpt_user_enable(struct mpt_softc *mpt)
134 {
135 
136 	return (0);
137 }
138 
139 void
140 mpt_user_ready(struct mpt_softc *mpt)
141 {
142 }
143 
144 int
145 mpt_user_event(struct mpt_softc *mpt, request_t *req,
146     MSG_EVENT_NOTIFY_REPLY *msg)
147 {
148 
149 	/* Someday we may want to let a user daemon listen for events? */
150 	return (0);
151 }
152 
153 void
154 mpt_user_reset(struct mpt_softc *mpt, int type)
155 {
156 }
157 
158 void
159 mpt_user_detach(struct mpt_softc *mpt)
160 {
161 	mpt_handler_t handler;
162 
163 	/* XXX: do a purge of pending requests? */
164 	destroy_dev(mpt->cdev);
165 
166 	MPT_LOCK(mpt);
167 	handler.reply_handler = mpt_user_reply_handler;
168 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
169 	    user_handler_id);
170 	MPT_UNLOCK(mpt);
171 }
172 
173 static int
174 mpt_open(struct dev_open_args *ap)
175 {
176 
177 	return (0);
178 }
179 
180 static int
181 mpt_close(struct dev_close_args *ap)
182 {
183 
184 	return (0);
185 }
186 
187 static int
188 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
189     size_t len)
190 {
191 	struct mpt_map_info mi;
192 	int error;
193 
194 	page_mem->vaddr = NULL;
195 
196 	/* Limit requests to 16M. */
197 	if (len > 16 * 1024 * 1024)
198 		return (ENOSPC);
199 	error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
200 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
201 	    len, 1, len, 0, &page_mem->tag);
202 	if (error)
203 		return (error);
204 	error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
205 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
206 	if (error) {
207 		bus_dma_tag_destroy(page_mem->tag);
208 		return (error);
209 	}
210 	mi.mpt = mpt;
211 	error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
212 	    len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
213 	if (error == 0)
214 		error = mi.error;
215 	if (error) {
216 		bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
217 		bus_dma_tag_destroy(page_mem->tag);
218 		page_mem->vaddr = NULL;
219 		return (error);
220 	}
221 	page_mem->paddr = mi.phys;
222 	return (0);
223 }
224 
225 static void
226 mpt_free_buffer(struct mpt_page_memory *page_mem)
227 {
228 
229 	if (page_mem->vaddr == NULL)
230 		return;
231 	bus_dmamap_unload(page_mem->tag, page_mem->map);
232 	bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
233 	bus_dma_tag_destroy(page_mem->tag);
234 	page_mem->vaddr = NULL;
235 }
236 
237 static int
238 mpt_user_read_cfg_header(struct mpt_softc *mpt,
239     struct mpt_cfg_page_req *page_req)
240 {
241 	request_t  *req;
242 	cfgparms_t params;
243 	MSG_CONFIG *cfgp;
244 	int	    error;
245 
246 	req = mpt_get_request(mpt, TRUE);
247 	if (req == NULL) {
248 		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
249 		return (ENOMEM);
250 	}
251 
252 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
253 	params.PageVersion = 0;
254 	params.PageLength = 0;
255 	params.PageNumber = page_req->header.PageNumber;
256 	params.PageType = page_req->header.PageType;
257 	params.PageAddress = le32toh(page_req->page_address);
258 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
259 				  TRUE, 5000);
260 	if (error != 0) {
261 		/*
262 		 * Leave the request. Without resetting the chip, it's
263 		 * still owned by it and we'll just get into trouble
264 		 * freeing it now. Mark it as abandoned so that if it
265 		 * shows up later it can be freed.
266 		 */
267 		mpt_prt(mpt, "read_cfg_header timed out\n");
268 		return (ETIMEDOUT);
269 	}
270 
271 	page_req->ioc_status = htole16(req->IOCStatus);
272 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
273 		cfgp = req->req_vbuf;
274 		bcopy(&cfgp->Header, &page_req->header,
275 		    sizeof(page_req->header));
276 	}
277 	mpt_free_request(mpt, req);
278 	return (0);
279 }
280 
281 static int
282 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
283     struct mpt_page_memory *mpt_page)
284 {
285 	CONFIG_PAGE_HEADER *hdr;
286 	request_t    *req;
287 	cfgparms_t    params;
288 	int	      error;
289 
290 	req = mpt_get_request(mpt, TRUE);
291 	if (req == NULL) {
292 		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
293 		return (ENOMEM);
294 	}
295 
296 	hdr = mpt_page->vaddr;
297 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
298 	params.PageVersion = hdr->PageVersion;
299 	params.PageLength = hdr->PageLength;
300 	params.PageNumber = hdr->PageNumber;
301 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
302 	params.PageAddress = le32toh(page_req->page_address);
303 	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
304 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
305 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
306 	    le32toh(page_req->len), TRUE, 5000);
307 	if (error != 0) {
308 		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
309 		return (ETIMEDOUT);
310 	}
311 
312 	page_req->ioc_status = htole16(req->IOCStatus);
313 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
314 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
315 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
316 	mpt_free_request(mpt, req);
317 	return (0);
318 }
319 
320 static int
321 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
322     struct mpt_ext_cfg_page_req *ext_page_req)
323 {
324 	request_t  *req;
325 	cfgparms_t params;
326 	MSG_CONFIG_REPLY *cfgp;
327 	int	    error;
328 
329 	req = mpt_get_request(mpt, TRUE);
330 	if (req == NULL) {
331 		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
332 		return (ENOMEM);
333 	}
334 
335 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
336 	params.PageVersion = ext_page_req->header.PageVersion;
337 	params.PageLength = 0;
338 	params.PageNumber = ext_page_req->header.PageNumber;
339 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
340 	params.PageAddress = le32toh(ext_page_req->page_address);
341 	params.ExtPageType = ext_page_req->header.ExtPageType;
342 	params.ExtPageLength = 0;
343 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
344 				  TRUE, 5000);
345 	if (error != 0) {
346 		/*
347 		 * Leave the request. Without resetting the chip, it's
348 		 * still owned by it and we'll just get into trouble
349 		 * freeing it now. Mark it as abandoned so that if it
350 		 * shows up later it can be freed.
351 		 */
352 		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
353 		return (ETIMEDOUT);
354 	}
355 
356 	ext_page_req->ioc_status = htole16(req->IOCStatus);
357 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
358 		cfgp = req->req_vbuf;
359 		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
360 		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
361 		ext_page_req->header.PageType = cfgp->Header.PageType;
362 		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
363 		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
364 	}
365 	mpt_free_request(mpt, req);
366 	return (0);
367 }
368 
369 static int
370 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
371     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
372 {
373 	CONFIG_EXTENDED_PAGE_HEADER *hdr;
374 	request_t    *req;
375 	cfgparms_t    params;
376 	int	      error;
377 
378 	req = mpt_get_request(mpt, TRUE);
379 	if (req == NULL) {
380 		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
381 		return (ENOMEM);
382 	}
383 
384 	hdr = mpt_page->vaddr;
385 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
386 	params.PageVersion = hdr->PageVersion;
387 	params.PageLength = 0;
388 	params.PageNumber = hdr->PageNumber;
389 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
390 	params.PageAddress = le32toh(ext_page_req->page_address);
391 	params.ExtPageType = hdr->ExtPageType;
392 	params.ExtPageLength = hdr->ExtPageLength;
393 	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
394 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
395 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
396 	    le32toh(ext_page_req->len), TRUE, 5000);
397 	if (error != 0) {
398 		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
399 		return (ETIMEDOUT);
400 	}
401 
402 	ext_page_req->ioc_status = htole16(req->IOCStatus);
403 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
404 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
405 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
406 	mpt_free_request(mpt, req);
407 	return (0);
408 }
409 
410 static int
411 mpt_user_write_cfg_page(struct mpt_softc *mpt,
412     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
413 {
414 	CONFIG_PAGE_HEADER *hdr;
415 	request_t    *req;
416 	cfgparms_t    params;
417 	u_int	      hdr_attr;
418 	int	      error;
419 
420 	hdr = mpt_page->vaddr;
421 	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
422 	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
423 	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
424 		mpt_prt(mpt, "page type 0x%x not changeable\n",
425 			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
426 		return (EINVAL);
427 	}
428 
429 #if	0
430 	/*
431 	 * We shouldn't mask off other bits here.
432 	 */
433 	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
434 #endif
435 
436 	req = mpt_get_request(mpt, TRUE);
437 	if (req == NULL)
438 		return (ENOMEM);
439 
440 	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
441 	    BUS_DMASYNC_PREWRITE);
442 
443 	/*
444 	 * There isn't any point in restoring stripped out attributes
445 	 * if you then mask them going down to issue the request.
446 	 */
447 
448 	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
449 	params.PageVersion = hdr->PageVersion;
450 	params.PageLength = hdr->PageLength;
451 	params.PageNumber = hdr->PageNumber;
452 	params.PageAddress = le32toh(page_req->page_address);
453 #if	0
454 	/* Restore stripped out attributes */
455 	hdr->PageType |= hdr_attr;
456 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
457 #else
458 	params.PageType = hdr->PageType;
459 #endif
460 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
461 	    le32toh(page_req->len), TRUE, 5000);
462 	if (error != 0) {
463 		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
464 		return (ETIMEDOUT);
465 	}
466 
467 	page_req->ioc_status = htole16(req->IOCStatus);
468 	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
469 	    BUS_DMASYNC_POSTWRITE);
470 	mpt_free_request(mpt, req);
471 	return (0);
472 }
473 
474 static int
475 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
476     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
477 {
478 	MSG_RAID_ACTION_REPLY *reply;
479 	struct mpt_user_raid_action_result *res;
480 
481 	if (req == NULL)
482 		return (TRUE);
483 
484 	if (reply_frame != NULL) {
485 		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
486 		req->IOCStatus = le16toh(reply->IOCStatus);
487 		res = (struct mpt_user_raid_action_result *)
488 		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
489 		res->action_status = reply->ActionStatus;
490 		res->volume_status = reply->VolumeStatus;
491 		bcopy(&reply->ActionData, res->action_data,
492 		    sizeof(res->action_data));
493 	}
494 
495 	req->state &= ~REQ_STATE_QUEUED;
496 	req->state |= REQ_STATE_DONE;
497 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
498 
499 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
500 		wakeup(req);
501 	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
502 		/*
503 		 * Whew- we can free this request (late completion)
504 		 */
505 		mpt_free_request(mpt, req);
506 	}
507 
508 	return (TRUE);
509 }
510 
511 /*
512  * We use the first part of the request buffer after the request frame
513  * to hold the action data and action status from the RAID reply.  The
514  * rest of the request buffer is used to hold the buffer for the
515  * action SGE.
516  */
517 static int
518 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
519 	struct mpt_page_memory *mpt_page)
520 {
521 	request_t *req;
522 	struct mpt_user_raid_action_result *res;
523 	MSG_RAID_ACTION_REQUEST *rap;
524 	SGE_SIMPLE32 *se;
525 	int error;
526 
527 	req = mpt_get_request(mpt, TRUE);
528 	if (req == NULL)
529 		return (ENOMEM);
530 	rap = req->req_vbuf;
531 	memset(rap, 0, sizeof *rap);
532 	rap->Action = raid_act->action;
533 	rap->ActionDataWord = raid_act->action_data_word;
534 	rap->Function = MPI_FUNCTION_RAID_ACTION;
535 	rap->VolumeID = raid_act->volume_id;
536 	rap->VolumeBus = raid_act->volume_bus;
537 	rap->PhysDiskNum = raid_act->phys_disk_num;
538 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
539 	if (mpt_page->vaddr != NULL && raid_act->len != 0) {
540 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
541 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
542 		se->Address = htole32(mpt_page->paddr);
543 		MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
544 		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
545 		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
546 		    MPI_SGE_FLAGS_END_OF_LIST |
547 		    raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
548 		    MPI_SGE_FLAGS_IOC_TO_HOST));
549 	}
550 	se->FlagsLength = htole32(se->FlagsLength);
551 	rap->MsgContext = htole32(req->index | user_handler_id);
552 
553 	mpt_check_doorbell(mpt);
554 	mpt_send_cmd(mpt, req);
555 
556 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
557 	    2000);
558 	if (error != 0) {
559 		/*
560 		 * Leave request so it can be cleaned up later.
561 		 */
562 		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
563 		return (error);
564 	}
565 
566 	raid_act->ioc_status = htole16(req->IOCStatus);
567 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
568 		mpt_free_request(mpt, req);
569 		return (0);
570 	}
571 
572 	res = (struct mpt_user_raid_action_result *)
573 	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
574 	raid_act->volume_status = res->volume_status;
575 	raid_act->action_status = res->action_status;
576 	bcopy(res->action_data, raid_act->action_data,
577 	    sizeof(res->action_data));
578 	if (mpt_page->vaddr != NULL)
579 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
580 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
581 	mpt_free_request(mpt, req);
582 	return (0);
583 }
584 
585 #ifdef __x86_64__
586 #define	PTRIN(p)		((void *)(uintptr_t)(p))
587 #define PTROUT(v)		((u_int32_t)(uintptr_t)(v))
588 #endif
589 
590 static int
591 mpt_ioctl(struct dev_ioctl_args *ap)
592 {
593 	cdev_t dev = ap->a_head.a_dev;
594 	u_long cmd = ap->a_cmd;
595 	caddr_t arg = ap->a_data;
596 	struct mpt_softc *mpt;
597 	struct mpt_cfg_page_req *page_req;
598 	struct mpt_ext_cfg_page_req *ext_page_req;
599 	struct mpt_raid_action *raid_act;
600 	struct mpt_page_memory mpt_page;
601 #ifdef __x86_64__
602 	struct mpt_cfg_page_req32 *page_req32;
603 	struct mpt_cfg_page_req page_req_swab;
604 	struct mpt_ext_cfg_page_req32 *ext_page_req32;
605 	struct mpt_ext_cfg_page_req ext_page_req_swab;
606 	struct mpt_raid_action32 *raid_act32;
607 	struct mpt_raid_action raid_act_swab;
608 #endif
609 	int error;
610 
611 	mpt = dev->si_drv1;
612 	page_req = (void *)arg;
613 	ext_page_req = (void *)arg;
614 	raid_act = (void *)arg;
615 	mpt_page.vaddr = NULL;
616 
617 #ifdef __x86_64__
618 	/* Convert 32-bit structs to native ones. */
619 	page_req32 = (void *)arg;
620 	ext_page_req32 = (void *)arg;
621 	raid_act32 = (void *)arg;
622 	switch (cmd) {
623 	case MPTIO_READ_CFG_HEADER32:
624 	case MPTIO_READ_CFG_PAGE32:
625 	case MPTIO_WRITE_CFG_PAGE32:
626 		page_req = &page_req_swab;
627 		page_req->header = page_req32->header;
628 		page_req->page_address = page_req32->page_address;
629 		page_req->buf = PTRIN(page_req32->buf);
630 		page_req->len = page_req32->len;
631 		page_req->ioc_status = page_req32->ioc_status;
632 		break;
633 	case MPTIO_READ_EXT_CFG_HEADER32:
634 	case MPTIO_READ_EXT_CFG_PAGE32:
635 		ext_page_req = &ext_page_req_swab;
636 		ext_page_req->header = ext_page_req32->header;
637 		ext_page_req->page_address = ext_page_req32->page_address;
638 		ext_page_req->buf = PTRIN(ext_page_req32->buf);
639 		ext_page_req->len = ext_page_req32->len;
640 		ext_page_req->ioc_status = ext_page_req32->ioc_status;
641 		break;
642 	case MPTIO_RAID_ACTION32:
643 		raid_act = &raid_act_swab;
644 		raid_act->action = raid_act32->action;
645 		raid_act->volume_bus = raid_act32->volume_bus;
646 		raid_act->volume_id = raid_act32->volume_id;
647 		raid_act->phys_disk_num = raid_act32->phys_disk_num;
648 		raid_act->action_data_word = raid_act32->action_data_word;
649 		raid_act->buf = PTRIN(raid_act32->buf);
650 		raid_act->len = raid_act32->len;
651 		raid_act->volume_status = raid_act32->volume_status;
652 		bcopy(raid_act32->action_data, raid_act->action_data,
653 		    sizeof(raid_act->action_data));
654 		raid_act->action_status = raid_act32->action_status;
655 		raid_act->ioc_status = raid_act32->ioc_status;
656 		raid_act->write = raid_act32->write;
657 		break;
658 	}
659 #endif
660 
661 	switch (cmd) {
662 #ifdef __x86_64__
663 	case MPTIO_READ_CFG_HEADER32:
664 #endif
665 	case MPTIO_READ_CFG_HEADER:
666 		MPT_LOCK(mpt);
667 		error = mpt_user_read_cfg_header(mpt, page_req);
668 		MPT_UNLOCK(mpt);
669 		break;
670 #ifdef __x86_64__
671 	case MPTIO_READ_CFG_PAGE32:
672 #endif
673 	case MPTIO_READ_CFG_PAGE:
674 		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
675 		if (error)
676 			break;
677 		error = copyin(page_req->buf, mpt_page.vaddr,
678 		    sizeof(CONFIG_PAGE_HEADER));
679 		if (error)
680 			break;
681 		MPT_LOCK(mpt);
682 		error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
683 		MPT_UNLOCK(mpt);
684 		if (error)
685 			break;
686 		error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
687 		break;
688 #ifdef __x86_64__
689 	case MPTIO_READ_EXT_CFG_HEADER32:
690 #endif
691 	case MPTIO_READ_EXT_CFG_HEADER:
692 		MPT_LOCK(mpt);
693 		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
694 		MPT_UNLOCK(mpt);
695 		break;
696 #ifdef __x86_64__
697 	case MPTIO_READ_EXT_CFG_PAGE32:
698 #endif
699 	case MPTIO_READ_EXT_CFG_PAGE:
700 		error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
701 		if (error)
702 			break;
703 		error = copyin(ext_page_req->buf, mpt_page.vaddr,
704 		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
705 		if (error)
706 			break;
707 		MPT_LOCK(mpt);
708 		error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
709 		MPT_UNLOCK(mpt);
710 		if (error)
711 			break;
712 		error = copyout(mpt_page.vaddr, ext_page_req->buf,
713 		    ext_page_req->len);
714 		break;
715 #ifdef __x86_64__
716 	case MPTIO_WRITE_CFG_PAGE32:
717 #endif
718 	case MPTIO_WRITE_CFG_PAGE:
719 		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
720 		if (error)
721 			break;
722 		error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
723 		if (error)
724 			break;
725 		MPT_LOCK(mpt);
726 		error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
727 		MPT_UNLOCK(mpt);
728 		break;
729 #ifdef __x86_64__
730 	case MPTIO_RAID_ACTION32:
731 #endif
732 	case MPTIO_RAID_ACTION:
733 		if (raid_act->buf != NULL) {
734 			error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
735 			if (error)
736 				break;
737 			error = copyin(raid_act->buf, mpt_page.vaddr,
738 			    raid_act->len);
739 			if (error)
740 				break;
741 		}
742 		MPT_LOCK(mpt);
743 		error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
744 		MPT_UNLOCK(mpt);
745 		if (error)
746 			break;
747 		if (raid_act->buf != NULL)
748 			error = copyout(mpt_page.vaddr, raid_act->buf,
749 			    raid_act->len);
750 		break;
751 	default:
752 		error = ENOIOCTL;
753 		break;
754 	}
755 
756 	mpt_free_buffer(&mpt_page);
757 
758 	if (error)
759 		return (error);
760 
761 #ifdef __x86_64__
762 	/* Convert native structs to 32-bit ones. */
763 	switch (cmd) {
764 	case MPTIO_READ_CFG_HEADER32:
765 	case MPTIO_READ_CFG_PAGE32:
766 	case MPTIO_WRITE_CFG_PAGE32:
767 		page_req32->header = page_req->header;
768 		page_req32->page_address = page_req->page_address;
769 		page_req32->buf = PTROUT(page_req->buf);
770 		page_req32->len = page_req->len;
771 		page_req32->ioc_status = page_req->ioc_status;
772 		break;
773 	case MPTIO_READ_EXT_CFG_HEADER32:
774 	case MPTIO_READ_EXT_CFG_PAGE32:
775 		ext_page_req32->header = ext_page_req->header;
776 		ext_page_req32->page_address = ext_page_req->page_address;
777 		ext_page_req32->buf = PTROUT(ext_page_req->buf);
778 		ext_page_req32->len = ext_page_req->len;
779 		ext_page_req32->ioc_status = ext_page_req->ioc_status;
780 		break;
781 	case MPTIO_RAID_ACTION32:
782 		raid_act32->action = raid_act->action;
783 		raid_act32->volume_bus = raid_act->volume_bus;
784 		raid_act32->volume_id = raid_act->volume_id;
785 		raid_act32->phys_disk_num = raid_act->phys_disk_num;
786 		raid_act32->action_data_word = raid_act->action_data_word;
787 		raid_act32->buf = PTROUT(raid_act->buf);
788 		raid_act32->len = raid_act->len;
789 		raid_act32->volume_status = raid_act->volume_status;
790 		bcopy(raid_act->action_data, raid_act32->action_data,
791 		    sizeof(raid_act->action_data));
792 		raid_act32->action_status = raid_act->action_status;
793 		raid_act32->ioc_status = raid_act->ioc_status;
794 		raid_act32->write = raid_act->write;
795 		break;
796 	}
797 #endif
798 
799 	return (0);
800 }
801