xref: /dragonfly/sys/dev/raid/hptiop/hptiop.c (revision 3d33658b)
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.15 2012/10/25 17:29:11 delphij Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/cons.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 
35 #include <sys/stat.h>
36 #include <sys/malloc.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 
40 #include <sys/kthread.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
43 
44 #include <sys/eventhandler.h>
45 #include <sys/bus.h>
46 #include <sys/taskqueue.h>
47 #include <sys/device.h>
48 #include <sys/mplock2.h>
49 
50 #include <machine/stdarg.h>
51 #include <sys/rman.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 
56 #include <bus/pci/pcireg.h>
57 #include <bus/pci/pcivar.h>
58 
59 #include <bus/cam/cam.h>
60 #include <bus/cam/cam_ccb.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_periph.h>
63 #include <bus/cam/cam_xpt_sim.h>
64 #include <bus/cam/cam_debug.h>
65 #include <bus/cam/cam_periph.h>
66 #include <bus/cam/scsi/scsi_all.h>
67 #include <bus/cam/scsi/scsi_message.h>
68 
69 #include <dev/raid/hptiop/hptiop.h>
70 
71 static const char driver_name[] = "hptiop";
72 static const char driver_version[] = "v1.8";
73 
74 static devclass_t hptiop_devclass;
75 
76 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
77 				u_int32_t msg, u_int32_t millisec);
78 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
79 							u_int32_t req);
80 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
81 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
82 							u_int32_t req);
83 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
84 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
85 				struct hpt_iop_ioctl_param *pParams);
86 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
87 				struct hpt_iop_ioctl_param *pParams);
88 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
89 				struct hpt_iop_ioctl_param *pParams);
90 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
91 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
92 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
93 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
94 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
95 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
96 				struct hpt_iop_request_get_config *config);
97 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
98 				struct hpt_iop_request_get_config *config);
99 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
100 				struct hpt_iop_request_get_config *config);
101 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
102 				struct hpt_iop_request_set_config *config);
103 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
104 				struct hpt_iop_request_set_config *config);
105 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
106 				struct hpt_iop_request_set_config *config);
107 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
108 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
109 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
110 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
112 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
113 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
114 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
115 				struct hpt_iop_request_ioctl_command *req,
116 				struct hpt_iop_ioctl_param *pParams);
117 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
118 				struct hpt_iop_request_ioctl_command *req,
119 				struct hpt_iop_ioctl_param *pParams);
120 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
121 				struct hpt_iop_srb *srb,
122 				bus_dma_segment_t *segs, int nsegs);
123 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
124 				struct hpt_iop_srb *srb,
125 				bus_dma_segment_t *segs, int nsegs);
126 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
127 				struct hpt_iop_srb *srb,
128 				bus_dma_segment_t *segs, int nsegs);
129 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
130 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
131 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
133 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
138 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
139 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
140 static int  hptiop_probe(device_t dev);
141 static int  hptiop_attach(device_t dev);
142 static int  hptiop_detach(device_t dev);
143 static int  hptiop_shutdown(device_t dev);
144 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
145 static void hptiop_poll(struct cam_sim *sim);
146 static void hptiop_async(void *callback_arg, u_int32_t code,
147 					struct cam_path *path, void *arg);
148 static void hptiop_pci_intr(void *arg);
149 static void hptiop_release_resource(struct hpt_iop_hba *hba);
150 static void hptiop_reset_adapter(void *argv);
151 static d_open_t hptiop_open;
152 static d_close_t hptiop_close;
153 static d_ioctl_t hptiop_ioctl;
154 
155 static struct dev_ops hptiop_ops = {
156 	{ driver_name, 0, 0 },
157 	.d_open = hptiop_open,
158 	.d_close = hptiop_close,
159 	.d_ioctl = hptiop_ioctl,
160 };
161 
162 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
163 
164 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
165 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
166 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
167 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
168 
169 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
170 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
171 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
172 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
173 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
174 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
175 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
176 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
177 
178 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
179 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
180 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
181 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
182 
183 static int hptiop_open(struct dev_open_args *ap)
184 {
185 	cdev_t dev = ap->a_head.a_dev;
186 	struct hpt_iop_hba *hba = hba_from_dev(dev);
187 
188 	if (hba==NULL)
189 		return ENXIO;
190 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
191 		return EBUSY;
192 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
193 	return 0;
194 }
195 
196 static int hptiop_close(struct dev_close_args *ap)
197 {
198 	cdev_t dev = ap->a_head.a_dev;
199 	struct hpt_iop_hba *hba = hba_from_dev(dev);
200 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
201 	return 0;
202 }
203 
204 static int hptiop_ioctl(struct dev_ioctl_args *ap)
205 {
206 	cdev_t dev = ap->a_head.a_dev;
207 	u_long cmd = ap->a_cmd;
208 	caddr_t data = ap->a_data;
209 	int ret = EFAULT;
210 	struct hpt_iop_hba *hba = hba_from_dev(dev);
211 
212 	get_mplock();
213 
214 	switch (cmd) {
215 	case HPT_DO_IOCONTROL:
216 		ret = hba->ops->do_ioctl(hba,
217 				(struct hpt_iop_ioctl_param *)data);
218 		break;
219 	case HPT_SCAN_BUS:
220 		ret = hptiop_rescan_bus(hba);
221 		break;
222 	}
223 
224 	rel_mplock();
225 
226 	return ret;
227 }
228 
229 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
230 {
231 	u_int64_t p;
232 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
233 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
234 
235 	if (outbound_tail != outbound_head) {
236 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
237 			offsetof(struct hpt_iopmu_mv,
238 				outbound_q[outbound_tail]),
239 			(u_int32_t *)&p, 2);
240 
241 		outbound_tail++;
242 
243 		if (outbound_tail == MVIOP_QUEUE_LEN)
244 			outbound_tail = 0;
245 
246 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
247 		return p;
248 	} else
249 		return 0;
250 }
251 
252 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
253 {
254 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
255 	u_int32_t head = inbound_head + 1;
256 
257 	if (head == MVIOP_QUEUE_LEN)
258 		head = 0;
259 
260 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
261 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
262 			(u_int32_t *)&p, 2);
263 	BUS_SPACE_WRT4_MV2(inbound_head, head);
264 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
265 }
266 
267 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
268 {
269 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
270 	BUS_SPACE_RD4_ITL(outbound_intstatus);
271 }
272 
273 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
274 {
275 
276 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
277 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
278 
279 	BUS_SPACE_RD4_MV0(outbound_intmask);
280 }
281 
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
283 {
284 	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
285 	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
286 }
287 
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
289 {
290 	u_int32_t req=0;
291 	int i;
292 
293 	for (i = 0; i < millisec; i++) {
294 		req = BUS_SPACE_RD4_ITL(inbound_queue);
295 		if (req != IOPMU_QUEUE_EMPTY)
296 			break;
297 		DELAY(1000);
298 	}
299 
300 	if (req!=IOPMU_QUEUE_EMPTY) {
301 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
302 		BUS_SPACE_RD4_ITL(outbound_intstatus);
303 		return 0;
304 	}
305 
306 	return -1;
307 }
308 
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
310 {
311 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
312 		return -1;
313 
314 	return 0;
315 }
316 
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
318 							u_int32_t millisec)
319 {
320 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 		return -1;
322 
323 	return 0;
324 }
325 
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 							u_int32_t index)
328 {
329 	struct hpt_iop_srb *srb;
330 	struct hpt_iop_request_scsi_command *req=NULL;
331 	union ccb *ccb;
332 	u_int8_t *cdb;
333 	u_int32_t result, temp, dxfer;
334 	u_int64_t temp64;
335 
336 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 		if (hba->firmware_version > 0x01020000 ||
338 			hba->interface_version > 0x01020000) {
339 			srb = hba->srb[index & ~(u_int32_t)
340 				(IOPMU_QUEUE_ADDR_HOST_BIT
341 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 			req = (struct hpt_iop_request_scsi_command *)srb;
343 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 				result = IOP_RESULT_SUCCESS;
345 			else
346 				result = req->header.result;
347 		} else {
348 			srb = hba->srb[index &
349 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 			req = (struct hpt_iop_request_scsi_command *)srb;
351 			result = req->header.result;
352 		}
353 		dxfer = req->dataxfer_length;
354 		goto srb_complete;
355 	}
356 
357 	/*iop req*/
358 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 		offsetof(struct hpt_iop_request_header, type));
360 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 		offsetof(struct hpt_iop_request_header, result));
362 	switch(temp) {
363 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364 	{
365 		temp64 = 0;
366 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 			offsetof(struct hpt_iop_request_header, context),
368 			(u_int32_t *)&temp64, 2);
369 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 		break;
371 	}
372 
373 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 			offsetof(struct hpt_iop_request_header, context),
376 			(u_int32_t *)&temp64, 2);
377 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 				index + offsetof(struct hpt_iop_request_scsi_command,
380 				dataxfer_length));
381 srb_complete:
382 		ccb = (union ccb *)srb->ccb;
383 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 			cdb = ccb->csio.cdb_io.cdb_ptr;
385 		else
386 			cdb = ccb->csio.cdb_io.cdb_bytes;
387 
388 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 			ccb->ccb_h.status = CAM_REQ_CMP;
390 			goto scsi_done;
391 		}
392 
393 		switch (result) {
394 		case IOP_RESULT_SUCCESS:
395 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 			case CAM_DIR_IN:
397 				bus_dmamap_sync(hba->io_dmat,
398 					srb->dma_map, BUS_DMASYNC_POSTREAD);
399 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 				break;
401 			case CAM_DIR_OUT:
402 				bus_dmamap_sync(hba->io_dmat,
403 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 				break;
406 			}
407 
408 			ccb->ccb_h.status = CAM_REQ_CMP;
409 			break;
410 
411 		case IOP_RESULT_BAD_TARGET:
412 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 			break;
414 		case IOP_RESULT_BUSY:
415 			ccb->ccb_h.status = CAM_BUSY;
416 			break;
417 		case IOP_RESULT_INVALID_REQUEST:
418 			ccb->ccb_h.status = CAM_REQ_INVALID;
419 			break;
420 		case IOP_RESULT_FAIL:
421 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 			break;
423 		case IOP_RESULT_RESET:
424 			ccb->ccb_h.status = CAM_BUSY;
425 			break;
426 		case IOP_RESULT_CHECK_CONDITION:
427 			memset(&ccb->csio.sense_data, 0,
428 			    sizeof(ccb->csio.sense_data));
429 			if (dxfer < ccb->csio.sense_len)
430 				ccb->csio.sense_resid = ccb->csio.sense_len -
431 				    dxfer;
432 			else
433 				ccb->csio.sense_resid = 0;
434 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 					index + offsetof(struct hpt_iop_request_scsi_command,
437 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 			} else {
440 				memcpy(&ccb->csio.sense_data, &req->sg_list,
441 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
442 			}
443 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 			break;
447 		default:
448 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 			break;
450 		}
451 scsi_done:
452 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
454 
455 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456 
457 		hptiop_free_srb(hba, srb);
458 		xpt_done(ccb);
459 		break;
460 	}
461 }
462 
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464 {
465 	u_int32_t req, temp;
466 
467 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 			hptiop_request_callback_itl(hba, req);
470 		else {
471 			temp = bus_space_read_4(hba->bar0t,
472 					hba->bar0h,req +
473 					offsetof(struct hpt_iop_request_header,
474 						flags));
475 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
476 				u_int64_t temp64;
477 				bus_space_read_region_4(hba->bar0t,
478 					hba->bar0h,req +
479 					offsetof(struct hpt_iop_request_header,
480 						context),
481 					(u_int32_t *)&temp64, 2);
482 				if (temp64) {
483 					hptiop_request_callback_itl(hba, req);
484 				} else {
485 					temp64 = 1;
486 					bus_space_write_region_4(hba->bar0t,
487 						hba->bar0h,req +
488 						offsetof(struct hpt_iop_request_header,
489 							context),
490 						(u_int32_t *)&temp64, 2);
491 				}
492 			} else
493 				hptiop_request_callback_itl(hba, req);
494 		}
495 	}
496 }
497 
498 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
499 {
500 	u_int32_t status;
501 	int ret = 0;
502 
503 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
504 
505 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
506 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
507 		KdPrint(("hptiop: received outbound msg %x\n", msg));
508 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
509 		hptiop_os_message_callback(hba, msg);
510 		ret = 1;
511 	}
512 
513 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
514 		hptiop_drain_outbound_queue_itl(hba);
515 		ret = 1;
516 	}
517 
518 	return ret;
519 }
520 
521 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
522 							u_int64_t _tag)
523 {
524 	u_int32_t context = (u_int32_t)_tag;
525 
526 	if (context & MVIOP_CMD_TYPE_SCSI) {
527 		struct hpt_iop_srb *srb;
528 		struct hpt_iop_request_scsi_command *req;
529 		union ccb *ccb;
530 		u_int8_t *cdb;
531 
532 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
533 		req = (struct hpt_iop_request_scsi_command *)srb;
534 		ccb = (union ccb *)srb->ccb;
535 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
536 			cdb = ccb->csio.cdb_io.cdb_ptr;
537 		else
538 			cdb = ccb->csio.cdb_io.cdb_bytes;
539 
540 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
541 			ccb->ccb_h.status = CAM_REQ_CMP;
542 			goto scsi_done;
543 		}
544 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
545 			req->header.result = IOP_RESULT_SUCCESS;
546 
547 		switch (req->header.result) {
548 		case IOP_RESULT_SUCCESS:
549 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
550 			case CAM_DIR_IN:
551 				bus_dmamap_sync(hba->io_dmat,
552 					srb->dma_map, BUS_DMASYNC_POSTREAD);
553 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
554 				break;
555 			case CAM_DIR_OUT:
556 				bus_dmamap_sync(hba->io_dmat,
557 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
558 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
559 				break;
560 			}
561 			ccb->ccb_h.status = CAM_REQ_CMP;
562 			break;
563 		case IOP_RESULT_BAD_TARGET:
564 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
565 			break;
566 		case IOP_RESULT_BUSY:
567 			ccb->ccb_h.status = CAM_BUSY;
568 			break;
569 		case IOP_RESULT_INVALID_REQUEST:
570 			ccb->ccb_h.status = CAM_REQ_INVALID;
571 			break;
572 		case IOP_RESULT_FAIL:
573 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
574 			break;
575 		case IOP_RESULT_RESET:
576 			ccb->ccb_h.status = CAM_BUSY;
577 			break;
578 		case IOP_RESULT_CHECK_CONDITION:
579 			memset(&ccb->csio.sense_data, 0,
580 			    sizeof(ccb->csio.sense_data));
581 			if (req->dataxfer_length < ccb->csio.sense_len)
582 				ccb->csio.sense_resid = ccb->csio.sense_len -
583 				    req->dataxfer_length;
584 			else
585 				ccb->csio.sense_resid = 0;
586 			memcpy(&ccb->csio.sense_data, &req->sg_list,
587 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
588 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
589 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
590 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
591 			break;
592 		default:
593 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
594 			break;
595 		}
596 scsi_done:
597 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
598 
599 		hptiop_free_srb(hba, srb);
600 		xpt_done(ccb);
601 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
602 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
603 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
604 			hba->config_done = 1;
605 		else
606 			hba->config_done = -1;
607 		wakeup(req);
608 	} else if (context &
609 			(MVIOP_CMD_TYPE_SET_CONFIG |
610 				MVIOP_CMD_TYPE_GET_CONFIG))
611 		hba->config_done = 1;
612 	else {
613 		device_printf(hba->pcidev, "wrong callback type\n");
614 	}
615 }
616 
617 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
618 				u_int32_t _tag)
619 {
620 	u_int32_t req_type = _tag & 0xf;
621 
622 	struct hpt_iop_srb *srb;
623 	struct hpt_iop_request_scsi_command *req;
624 	union ccb *ccb;
625 	u_int8_t *cdb;
626 
627 	switch (req_type) {
628 	case IOP_REQUEST_TYPE_GET_CONFIG:
629 	case IOP_REQUEST_TYPE_SET_CONFIG:
630 		hba->config_done = 1;
631 		break;
632 
633 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
634 		srb = hba->srb[(_tag >> 4) & 0xff];
635 		req = (struct hpt_iop_request_scsi_command *)srb;
636 
637 		ccb = (union ccb *)srb->ccb;
638 
639 		callout_stop(ccb->ccb_h.timeout_ch);
640 
641 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
642 			cdb = ccb->csio.cdb_io.cdb_ptr;
643 		else
644 			cdb = ccb->csio.cdb_io.cdb_bytes;
645 
646 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
647 			ccb->ccb_h.status = CAM_REQ_CMP;
648 			goto scsi_done;
649 		}
650 
651 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
652 			req->header.result = IOP_RESULT_SUCCESS;
653 
654 		switch (req->header.result) {
655 		case IOP_RESULT_SUCCESS:
656 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
657 			case CAM_DIR_IN:
658 				bus_dmamap_sync(hba->io_dmat,
659 						srb->dma_map, BUS_DMASYNC_POSTREAD);
660 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
661 				break;
662 			case CAM_DIR_OUT:
663 				bus_dmamap_sync(hba->io_dmat,
664 						srb->dma_map, BUS_DMASYNC_POSTWRITE);
665 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
666 				break;
667 			}
668 			ccb->ccb_h.status = CAM_REQ_CMP;
669 			break;
670 		case IOP_RESULT_BAD_TARGET:
671 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
672 			break;
673 		case IOP_RESULT_BUSY:
674 			ccb->ccb_h.status = CAM_BUSY;
675 			break;
676 		case IOP_RESULT_INVALID_REQUEST:
677 			ccb->ccb_h.status = CAM_REQ_INVALID;
678 			break;
679 		case IOP_RESULT_FAIL:
680 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
681 			break;
682 		case IOP_RESULT_RESET:
683 			ccb->ccb_h.status = CAM_BUSY;
684 			break;
685 		case IOP_RESULT_CHECK_CONDITION:
686 			memset(&ccb->csio.sense_data, 0,
687 			       sizeof(ccb->csio.sense_data));
688 			if (req->dataxfer_length < ccb->csio.sense_len)
689 				ccb->csio.sense_resid = ccb->csio.sense_len -
690 				req->dataxfer_length;
691 			else
692 				ccb->csio.sense_resid = 0;
693 			memcpy(&ccb->csio.sense_data, &req->sg_list,
694 			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
695 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
696 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
697 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
698 			break;
699 		default:
700 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
701 			break;
702 		}
703 scsi_done:
704 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
705 
706 		hptiop_free_srb(hba, srb);
707 		xpt_done(ccb);
708 		break;
709 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
710 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
711 			hba->config_done = 1;
712 		else
713 			hba->config_done = -1;
714 		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
715 		break;
716 	default:
717 		device_printf(hba->pcidev, "wrong callback type\n");
718 		break;
719 	}
720 }
721 
722 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
723 {
724 	u_int64_t req;
725 
726 	while ((req = hptiop_mv_outbound_read(hba))) {
727 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
728 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
729 				hptiop_request_callback_mv(hba, req);
730 			}
731 		}
732 	}
733 }
734 
735 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
736 {
737 	u_int32_t status;
738 	int ret = 0;
739 
740 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
741 
742 	if (status)
743 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
744 
745 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
746 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
747 		KdPrint(("hptiop: received outbound msg %x\n", msg));
748 		hptiop_os_message_callback(hba, msg);
749 		ret = 1;
750 	}
751 
752 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
753 		hptiop_drain_outbound_queue_mv(hba);
754 		ret = 1;
755 	}
756 
757 	return ret;
758 }
759 
760 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
761 {
762 	u_int32_t status, _tag, cptr;
763 	int ret = 0;
764 
765 	if (hba->initialized) {
766 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
767 	}
768 
769 	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
770 	if (status) {
771 		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
772 		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
773 			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
774 			hptiop_os_message_callback(hba, msg);
775 		}
776 		ret = 1;
777 	}
778 
779 	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
780 	if (status) {
781 		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
782 		do {
783 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
784 			while (hba->u.mvfrey.outlist_rptr != cptr) {
785 				hba->u.mvfrey.outlist_rptr++;
786 				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
787 					hba->u.mvfrey.outlist_rptr = 0;
788 				}
789 
790 				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
791 				hptiop_request_callback_mvfrey(hba, _tag);
792 				ret = 2;
793 			}
794 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
795 	}
796 
797 	if (hba->initialized) {
798 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
799 	}
800 
801 	return ret;
802 }
803 
804 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
805 					u_int32_t req32, u_int32_t millisec)
806 {
807 	u_int32_t i;
808 	u_int64_t temp64;
809 
810 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
811 	BUS_SPACE_RD4_ITL(outbound_intstatus);
812 
813 	for (i = 0; i < millisec; i++) {
814 		hptiop_intr_itl(hba);
815 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
816 			offsetof(struct hpt_iop_request_header, context),
817 			(u_int32_t *)&temp64, 2);
818 		if (temp64)
819 			return 0;
820 		DELAY(1000);
821 	}
822 
823 	return -1;
824 }
825 
826 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
827 					void *req, u_int32_t millisec)
828 {
829 	u_int32_t i;
830 	u_int64_t phy_addr;
831 	hba->config_done = 0;
832 
833 	phy_addr = hba->ctlcfgcmd_phy |
834 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
835 	((struct hpt_iop_request_get_config *)req)->header.flags |=
836 		IOP_REQUEST_FLAG_SYNC_REQUEST |
837 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
838 	hptiop_mv_inbound_write(phy_addr, hba);
839 	BUS_SPACE_RD4_MV0(outbound_intmask);
840 
841 	for (i = 0; i < millisec; i++) {
842 		hptiop_intr_mv(hba);
843 		if (hba->config_done)
844 			return 0;
845 		DELAY(1000);
846 	}
847 	return -1;
848 }
849 
850 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
851 					void *req, u_int32_t millisec)
852 {
853 	u_int32_t i, index;
854 	u_int64_t phy_addr;
855 	struct hpt_iop_request_header *reqhdr =	(struct hpt_iop_request_header *)req;
856 
857 	hba->config_done = 0;
858 
859 	phy_addr = hba->ctlcfgcmd_phy;
860 	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
861 					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
862 					| IOP_REQUEST_FLAG_ADDR_BITS
863 					| ((phy_addr >> 16) & 0xffff0000);
864 	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
865 					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
866 
867 	hba->u.mvfrey.inlist_wptr++;
868 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
869 
870 	if (index == hba->u.mvfrey.list_count) {
871 		index = 0;
872 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
873 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
874 	}
875 
876 	hba->u.mvfrey.inlist[index].addr = phy_addr;
877 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
878 
879 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
880 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
881 
882 	for (i = 0; i < millisec; i++) {
883 		hptiop_intr_mvfrey(hba);
884 		if (hba->config_done)
885 			return 0;
886 		DELAY(1000);
887 	}
888 	return -1;
889 }
890 
891 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
892 					u_int32_t msg, u_int32_t millisec)
893 {
894 	u_int32_t i;
895 
896 	hba->msg_done = 0;
897 	hba->ops->post_msg(hba, msg);
898 
899 	for (i=0; i<millisec; i++) {
900 		hba->ops->iop_intr(hba);
901 		if (hba->msg_done)
902 			break;
903 		DELAY(1000);
904 	}
905 
906 	return hba->msg_done? 0 : -1;
907 }
908 
909 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
910 				struct hpt_iop_request_get_config * config)
911 {
912 	u_int32_t req32;
913 
914 	config->header.size = sizeof(struct hpt_iop_request_get_config);
915 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
916 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
917 	config->header.result = IOP_RESULT_PENDING;
918 	config->header.context = 0;
919 
920 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
921 	if (req32 == IOPMU_QUEUE_EMPTY)
922 		return -1;
923 
924 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
925 			req32, (u_int32_t *)config,
926 			sizeof(struct hpt_iop_request_header) >> 2);
927 
928 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
929 		KdPrint(("hptiop: get config send cmd failed"));
930 		return -1;
931 	}
932 
933 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
934 			req32, (u_int32_t *)config,
935 			sizeof(struct hpt_iop_request_get_config) >> 2);
936 
937 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
938 
939 	return 0;
940 }
941 
942 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
943 				struct hpt_iop_request_get_config * config)
944 {
945 	struct hpt_iop_request_get_config *req;
946 
947 	if (!(req = hba->ctlcfg_ptr))
948 		return -1;
949 
950 	req->header.flags = 0;
951 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
952 	req->header.size = sizeof(struct hpt_iop_request_get_config);
953 	req->header.result = IOP_RESULT_PENDING;
954 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
955 
956 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
957 		KdPrint(("hptiop: get config send cmd failed"));
958 		return -1;
959 	}
960 
961 	*config = *req;
962 	return 0;
963 }
964 
965 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
966 				struct hpt_iop_request_get_config * config)
967 {
968 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
969 
970 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
971 	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
972 		KdPrint(("hptiop: header size %x/%x type %x/%x",
973 			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
974 			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
975 		return -1;
976 	}
977 
978 	config->interface_version = info->interface_version;
979 	config->firmware_version = info->firmware_version;
980 	config->max_requests = info->max_requests;
981 	config->request_size = info->request_size;
982 	config->max_sg_count = info->max_sg_count;
983 	config->data_transfer_length = info->data_transfer_length;
984 	config->alignment_mask = info->alignment_mask;
985 	config->max_devices = info->max_devices;
986 	config->sdram_size = info->sdram_size;
987 
988 	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
989 		 config->max_requests, config->request_size,
990 		 config->data_transfer_length, config->max_devices,
991 		 config->sdram_size));
992 
993 	return 0;
994 }
995 
996 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
997 				struct hpt_iop_request_set_config *config)
998 {
999 	u_int32_t req32;
1000 
1001 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1002 
1003 	if (req32 == IOPMU_QUEUE_EMPTY)
1004 		return -1;
1005 
1006 	config->header.size = sizeof(struct hpt_iop_request_set_config);
1007 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1008 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1009 	config->header.result = IOP_RESULT_PENDING;
1010 	config->header.context = 0;
1011 
1012 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1013 		(u_int32_t *)config,
1014 		sizeof(struct hpt_iop_request_set_config) >> 2);
1015 
1016 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1017 		KdPrint(("hptiop: set config send cmd failed"));
1018 		return -1;
1019 	}
1020 
1021 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1022 
1023 	return 0;
1024 }
1025 
1026 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1027 				struct hpt_iop_request_set_config *config)
1028 {
1029 	struct hpt_iop_request_set_config *req;
1030 
1031 	if (!(req = hba->ctlcfg_ptr))
1032 		return -1;
1033 
1034 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1035 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1036 		sizeof(struct hpt_iop_request_set_config) -
1037 			sizeof(struct hpt_iop_request_header));
1038 
1039 	req->header.flags = 0;
1040 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1041 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1042 	req->header.result = IOP_RESULT_PENDING;
1043 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1044 
1045 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1046 		KdPrint(("hptiop: set config send cmd failed"));
1047 		return -1;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1054 				struct hpt_iop_request_set_config *config)
1055 {
1056 	struct hpt_iop_request_set_config *req;
1057 
1058 	if (!(req = hba->ctlcfg_ptr))
1059 		return -1;
1060 
1061 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1062 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1063 		sizeof(struct hpt_iop_request_set_config) -
1064 			sizeof(struct hpt_iop_request_header));
1065 
1066 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1067 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1068 	req->header.result = IOP_RESULT_PENDING;
1069 
1070 	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1071 		KdPrint(("hptiop: set config send cmd failed"));
1072 		return -1;
1073 	}
1074 
1075 	return 0;
1076 }
1077 
1078 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1079 				u_int32_t req32,
1080 				struct hpt_iop_ioctl_param *pParams)
1081 {
1082 	u_int64_t temp64;
1083 	struct hpt_iop_request_ioctl_command req;
1084 
1085 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1086 			(hba->max_request_size -
1087 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1088 		device_printf(hba->pcidev, "request size beyond max value");
1089 		return -1;
1090 	}
1091 
1092 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1093 		+ pParams->nInBufferSize;
1094 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1095 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1096 	req.header.result = IOP_RESULT_PENDING;
1097 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1098 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1099 	req.inbuf_size = pParams->nInBufferSize;
1100 	req.outbuf_size = pParams->nOutBufferSize;
1101 	req.bytes_returned = 0;
1102 
1103 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1104 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1105 
1106 	hptiop_lock_adapter(hba);
1107 
1108 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1109 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1110 
1111 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1112 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1113 		(u_int32_t *)&temp64, 2);
1114 	while (temp64) {
1115 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1116 				0, "hptctl", HPT_OSM_TIMEOUT)==0)
1117 			break;
1118 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1119 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1120 			offsetof(struct hpt_iop_request_ioctl_command,
1121 				header.context),
1122 			(u_int32_t *)&temp64, 2);
1123 	}
1124 
1125 	hptiop_unlock_adapter(hba);
1126 	return 0;
1127 }
1128 
1129 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1130     void *user, int size)
1131 {
1132 	unsigned char byte;
1133 	int i;
1134 
1135 	for (i=0; i<size; i++) {
1136 		if (copyin((u_int8_t *)user + i, &byte, 1))
1137 			return -1;
1138 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1139 	}
1140 
1141 	return 0;
1142 }
1143 
1144 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1145     void *user, int size)
1146 {
1147 	unsigned char byte;
1148 	int i;
1149 
1150 	for (i=0; i<size; i++) {
1151 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1152 		if (copyout(&byte, (u_int8_t *)user + i, 1))
1153 			return -1;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1160 				struct hpt_iop_ioctl_param * pParams)
1161 {
1162 	u_int32_t req32;
1163 	u_int32_t result;
1164 
1165 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1166 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1167 		return EFAULT;
1168 
1169 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1170 	if (req32 == IOPMU_QUEUE_EMPTY)
1171 		return EFAULT;
1172 
1173 	if (pParams->nInBufferSize)
1174 		if (hptiop_bus_space_copyin(hba, req32 +
1175 			offsetof(struct hpt_iop_request_ioctl_command, buf),
1176 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1177 			goto invalid;
1178 
1179 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1180 		goto invalid;
1181 
1182 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1183 			offsetof(struct hpt_iop_request_ioctl_command,
1184 				header.result));
1185 
1186 	if (result == IOP_RESULT_SUCCESS) {
1187 		if (pParams->nOutBufferSize)
1188 			if (hptiop_bus_space_copyout(hba, req32 +
1189 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1190 					((pParams->nInBufferSize + 3) & ~3),
1191 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1192 				goto invalid;
1193 
1194 		if (pParams->lpBytesReturned) {
1195 			if (hptiop_bus_space_copyout(hba, req32 +
1196 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1197 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1198 				goto invalid;
1199 		}
1200 
1201 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1202 
1203 		return 0;
1204 	} else{
1205 invalid:
1206 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1207 
1208 		return EFAULT;
1209 	}
1210 }
1211 
1212 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1213 				struct hpt_iop_request_ioctl_command *req,
1214 				struct hpt_iop_ioctl_param *pParams)
1215 {
1216 	u_int64_t req_phy;
1217 	int size = 0;
1218 
1219 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1220 			(hba->max_request_size -
1221 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1222 		device_printf(hba->pcidev, "request size beyond max value");
1223 		return -1;
1224 	}
1225 
1226 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1227 	req->inbuf_size = pParams->nInBufferSize;
1228 	req->outbuf_size = pParams->nOutBufferSize;
1229 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1230 					+ pParams->nInBufferSize;
1231 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1232 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1233 	req->header.result = IOP_RESULT_PENDING;
1234 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1235 	size = req->header.size >> 8;
1236 	size = size > 3 ? 3 : size;
1237 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1238 	hptiop_mv_inbound_write(req_phy, hba);
1239 
1240 	BUS_SPACE_RD4_MV0(outbound_intmask);
1241 
1242 	while (hba->config_done == 0) {
1243 		if (hptiop_sleep(hba, req, 0,
1244 			"hptctl", HPT_OSM_TIMEOUT)==0)
1245 			continue;
1246 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1247 	}
1248 	return 0;
1249 }
1250 
1251 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1252 				struct hpt_iop_ioctl_param *pParams)
1253 {
1254 	struct hpt_iop_request_ioctl_command *req;
1255 
1256 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1257 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1258 		return EFAULT;
1259 
1260 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1261 	hba->config_done = 0;
1262 	hptiop_lock_adapter(hba);
1263 	if (pParams->nInBufferSize)
1264 		if (copyin((void *)pParams->lpInBuffer,
1265 				req->buf, pParams->nInBufferSize))
1266 			goto invalid;
1267 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1268 		goto invalid;
1269 
1270 	if (hba->config_done == 1) {
1271 		if (pParams->nOutBufferSize)
1272 			if (copyout(req->buf +
1273 				((pParams->nInBufferSize + 3) & ~3),
1274 				(void *)pParams->lpOutBuffer,
1275 				pParams->nOutBufferSize))
1276 				goto invalid;
1277 
1278 		if (pParams->lpBytesReturned)
1279 			if (copyout(&req->bytes_returned,
1280 				(void*)pParams->lpBytesReturned,
1281 				sizeof(u_int32_t)))
1282 				goto invalid;
1283 		hptiop_unlock_adapter(hba);
1284 		return 0;
1285 	} else{
1286 invalid:
1287 		hptiop_unlock_adapter(hba);
1288 		return EFAULT;
1289 	}
1290 }
1291 
1292 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1293 				struct hpt_iop_request_ioctl_command *req,
1294 				struct hpt_iop_ioctl_param *pParams)
1295 {
1296 	u_int64_t phy_addr;
1297 	u_int32_t index;
1298 
1299 	phy_addr = hba->ctlcfgcmd_phy;
1300 
1301 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1302 			(hba->max_request_size -
1303 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1304 		device_printf(hba->pcidev, "request size beyond max value");
1305 		return -1;
1306 	}
1307 
1308 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1309 	req->inbuf_size = pParams->nInBufferSize;
1310 	req->outbuf_size = pParams->nOutBufferSize;
1311 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1312 					+ pParams->nInBufferSize;
1313 
1314 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1315 	req->header.result = IOP_RESULT_PENDING;
1316 
1317 	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1318 						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1319 						| IOP_REQUEST_FLAG_ADDR_BITS
1320 						| ((phy_addr >> 16) & 0xffff0000);
1321 	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1322 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1323 
1324 	hba->u.mvfrey.inlist_wptr++;
1325 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1326 
1327 	if (index == hba->u.mvfrey.list_count) {
1328 		index = 0;
1329 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1330 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1331 	}
1332 
1333 	hba->u.mvfrey.inlist[index].addr = phy_addr;
1334 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1335 
1336 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1337 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1338 
1339 	while (hba->config_done == 0) {
1340 		if (hptiop_sleep(hba, req, 0, "hptctl", HPT_OSM_TIMEOUT) == 0)
1341 			continue;
1342 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1343 	}
1344 	return 0;
1345 }
1346 
1347 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1348 				struct hpt_iop_ioctl_param *pParams)
1349 {
1350 	struct hpt_iop_request_ioctl_command *req;
1351 
1352 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1353 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1354 		return EFAULT;
1355 
1356 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1357 	hba->config_done = 0;
1358 	hptiop_lock_adapter(hba);
1359 	if (pParams->nInBufferSize)
1360 		if (copyin((void *)pParams->lpInBuffer,
1361 				req->buf, pParams->nInBufferSize))
1362 			goto invalid;
1363 	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1364 		goto invalid;
1365 
1366 	if (hba->config_done == 1) {
1367 		if (pParams->nOutBufferSize)
1368 			if (copyout(req->buf +
1369 				((pParams->nInBufferSize + 3) & ~3),
1370 				(void *)pParams->lpOutBuffer,
1371 				pParams->nOutBufferSize))
1372 				goto invalid;
1373 
1374 		if (pParams->lpBytesReturned)
1375 			if (copyout(&req->bytes_returned,
1376 				(void*)pParams->lpBytesReturned,
1377 				sizeof(u_int32_t)))
1378 				goto invalid;
1379 		hptiop_unlock_adapter(hba);
1380 		return 0;
1381 	} else{
1382 invalid:
1383 		hptiop_unlock_adapter(hba);
1384 		return EFAULT;
1385 	}
1386 }
1387 
1388 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1389 {
1390 	union ccb           *ccb;
1391 
1392 	if ((ccb = xpt_alloc_ccb()) == NULL)
1393 		return(ENOMEM);
1394 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1395 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1396 		xpt_free_ccb(&ccb->ccb_h);
1397 		return(EIO);
1398 	}
1399 
1400 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1401 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
1402 	ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
1403 	ccb->crcn.flags = CAM_FLAG_NONE;
1404 	xpt_action(ccb);
1405 	return(0);
1406 }
1407 
1408 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
1409 {
1410 	xpt_free_path(ccb->ccb_h.path);
1411 	xpt_free_ccb(&ccb->ccb_h);
1412 }
1413 
1414 static	bus_dmamap_callback_t	hptiop_map_srb;
1415 static	bus_dmamap_callback_t	hptiop_post_scsi_command;
1416 static	bus_dmamap_callback_t	hptiop_mv_map_ctlcfg;
1417 static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1418 
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1420 {
1421 	hba->bar0_rid = 0x10;
1422 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1423 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1424 
1425 	if (hba->bar0_res == NULL) {
1426 		device_printf(hba->pcidev,
1427 			"failed to get iop base adrress.\n");
1428 		return -1;
1429 	}
1430 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1431 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1432 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1433 				rman_get_virtual(hba->bar0_res);
1434 
1435 	if (!hba->u.itl.mu) {
1436 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1437 					hba->bar0_rid, hba->bar0_res);
1438 		device_printf(hba->pcidev, "alloc mem res failed\n");
1439 		return -1;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1446 {
1447 	hba->bar0_rid = 0x10;
1448 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1449 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1450 
1451 	if (hba->bar0_res == NULL) {
1452 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453 		return -1;
1454 	}
1455 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1456 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1457 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1458 				rman_get_virtual(hba->bar0_res);
1459 
1460 	if (!hba->u.mv.regs) {
1461 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1462 					hba->bar0_rid, hba->bar0_res);
1463 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1464 		return -1;
1465 	}
1466 
1467 	hba->bar2_rid = 0x18;
1468 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1469 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1470 
1471 	if (hba->bar2_res == NULL) {
1472 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473 					hba->bar0_rid, hba->bar0_res);
1474 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1475 		return -1;
1476 	}
1477 
1478 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1479 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1480 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1481 
1482 	if (!hba->u.mv.mu) {
1483 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 					hba->bar0_rid, hba->bar0_res);
1485 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1486 					hba->bar2_rid, hba->bar2_res);
1487 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1488 		return -1;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1495 {
1496 	hba->bar0_rid = 0x10;
1497 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1498 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1499 
1500 	if (hba->bar0_res == NULL) {
1501 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502 		return -1;
1503 	}
1504 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1505 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1506 	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1507 				rman_get_virtual(hba->bar0_res);
1508 
1509 	if (!hba->u.mvfrey.config) {
1510 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1511 					hba->bar0_rid, hba->bar0_res);
1512 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1513 		return -1;
1514 	}
1515 
1516 	hba->bar2_rid = 0x18;
1517 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1518 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1519 
1520 	if (hba->bar2_res == NULL) {
1521 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522 					hba->bar0_rid, hba->bar0_res);
1523 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1524 		return -1;
1525 	}
1526 
1527 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1528 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1529 	hba->u.mvfrey.mu =
1530 					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1531 
1532 	if (!hba->u.mvfrey.mu) {
1533 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 					hba->bar0_rid, hba->bar0_res);
1535 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1536 					hba->bar2_rid, hba->bar2_res);
1537 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1538 		return -1;
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1545 {
1546 	if (hba->bar0_res)
1547 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1548 			hba->bar0_rid, hba->bar0_res);
1549 }
1550 
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1552 {
1553 	if (hba->bar0_res)
1554 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555 			hba->bar0_rid, hba->bar0_res);
1556 	if (hba->bar2_res)
1557 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1558 			hba->bar2_rid, hba->bar2_res);
1559 }
1560 
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1562 {
1563 	if (hba->bar0_res)
1564 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1565 			hba->bar0_rid, hba->bar0_res);
1566 	if (hba->bar2_res)
1567 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1568 			hba->bar2_rid, hba->bar2_res);
1569 }
1570 
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1572 {
1573 	if (bus_dma_tag_create(hba->parent_dmat,
1574 				1,
1575 				0,
1576 				BUS_SPACE_MAXADDR_32BIT,
1577 				BUS_SPACE_MAXADDR,
1578 				NULL, NULL,
1579 				0x800 - 0x8,
1580 				1,
1581 				BUS_SPACE_MAXSIZE_32BIT,
1582 				BUS_DMA_ALLOCNOW,
1583 				&hba->ctlcfg_dmat)) {
1584 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1585 		return -1;
1586 	}
1587 
1588 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1589 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1590 		&hba->ctlcfg_dmamap) != 0) {
1591 			device_printf(hba->pcidev,
1592 					"bus_dmamem_alloc failed!\n");
1593 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1594 			return -1;
1595 	}
1596 
1597 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1598 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1599 			MVIOP_IOCTLCFG_SIZE,
1600 			hptiop_mv_map_ctlcfg, hba, 0)) {
1601 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1602 		if (hba->ctlcfg_dmat) {
1603 			bus_dmamem_free(hba->ctlcfg_dmat,
1604 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1605 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1606 		}
1607 		return -1;
1608 	}
1609 
1610 	return 0;
1611 }
1612 
1613 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1614 {
1615 	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1616 
1617 	list_count >>= 16;
1618 
1619 	if (list_count == 0) {
1620 		return -1;
1621 	}
1622 
1623 	hba->u.mvfrey.list_count = list_count;
1624 	hba->u.mvfrey.internal_mem_size = 0x800
1625 							+ list_count * sizeof(struct mvfrey_inlist_entry)
1626 							+ list_count * sizeof(struct mvfrey_outlist_entry)
1627 							+ sizeof(int);
1628 	if (bus_dma_tag_create(hba->parent_dmat,
1629 				1,
1630 				0,
1631 				BUS_SPACE_MAXADDR_32BIT,
1632 				BUS_SPACE_MAXADDR,
1633 				NULL, NULL,
1634 				hba->u.mvfrey.internal_mem_size,
1635 				1,
1636 				BUS_SPACE_MAXSIZE_32BIT,
1637 				BUS_DMA_ALLOCNOW,
1638 				&hba->ctlcfg_dmat)) {
1639 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1640 		return -1;
1641 	}
1642 
1643 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1644 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1645 		&hba->ctlcfg_dmamap) != 0) {
1646 			device_printf(hba->pcidev,
1647 					"bus_dmamem_alloc failed!\n");
1648 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1649 			return -1;
1650 	}
1651 
1652 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1653 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1654 			hba->u.mvfrey.internal_mem_size,
1655 			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1656 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1657 		if (hba->ctlcfg_dmat) {
1658 			bus_dmamem_free(hba->ctlcfg_dmat,
1659 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1660 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1661 		}
1662 		return -1;
1663 	}
1664 
1665 	return 0;
1666 }
1667 
1668 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1669 	return 0;
1670 }
1671 
1672 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1673 {
1674 	if (hba->ctlcfg_dmat) {
1675 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1676 		bus_dmamem_free(hba->ctlcfg_dmat,
1677 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1678 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1679 	}
1680 
1681 	return 0;
1682 }
1683 
1684 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1685 {
1686 	if (hba->ctlcfg_dmat) {
1687 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1688 		bus_dmamem_free(hba->ctlcfg_dmat,
1689 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1690 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1691 	}
1692 
1693 	return 0;
1694 }
1695 
1696 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1697 {
1698 	u_int32_t i = 100;
1699 
1700 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1701 		return -1;
1702 
1703 	/* wait 100ms for MCU ready */
1704 	while(i--) {
1705 		DELAY(1000);
1706 	}
1707 
1708 	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1709 							hba->u.mvfrey.inlist_phy & 0xffffffff);
1710 	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1711 							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1712 
1713 	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1714 							hba->u.mvfrey.outlist_phy & 0xffffffff);
1715 	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1716 							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1717 
1718 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1719 							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1720 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1721 							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1722 
1723 	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1724 								| CL_POINTER_TOGGLE;
1725 	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1726 								| CL_POINTER_TOGGLE;
1727 	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1728 
1729 	return 0;
1730 }
1731 
1732 /*
1733  * CAM driver interface
1734  */
1735 static device_method_t driver_methods[] = {
1736 	/* Device interface */
1737 	DEVMETHOD(device_probe,     hptiop_probe),
1738 	DEVMETHOD(device_attach,    hptiop_attach),
1739 	DEVMETHOD(device_detach,    hptiop_detach),
1740 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1741 	DEVMETHOD_END
1742 };
1743 
1744 static struct hptiop_adapter_ops hptiop_itl_ops = {
1745 	.family	           = INTEL_BASED_IOP,
1746 	.iop_wait_ready    = hptiop_wait_ready_itl,
1747 	.internal_memalloc = 0,
1748 	.internal_memfree  = hptiop_internal_memfree_itl,
1749 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1750 	.release_pci_res   = hptiop_release_pci_res_itl,
1751 	.enable_intr       = hptiop_enable_intr_itl,
1752 	.disable_intr      = hptiop_disable_intr_itl,
1753 	.get_config        = hptiop_get_config_itl,
1754 	.set_config        = hptiop_set_config_itl,
1755 	.iop_intr          = hptiop_intr_itl,
1756 	.post_msg          = hptiop_post_msg_itl,
1757 	.post_req          = hptiop_post_req_itl,
1758 	.do_ioctl          = hptiop_do_ioctl_itl,
1759 	.reset_comm        = 0,
1760 };
1761 
1762 static struct hptiop_adapter_ops hptiop_mv_ops = {
1763 	.family	           = MV_BASED_IOP,
1764 	.iop_wait_ready    = hptiop_wait_ready_mv,
1765 	.internal_memalloc = hptiop_internal_memalloc_mv,
1766 	.internal_memfree  = hptiop_internal_memfree_mv,
1767 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1768 	.release_pci_res   = hptiop_release_pci_res_mv,
1769 	.enable_intr       = hptiop_enable_intr_mv,
1770 	.disable_intr      = hptiop_disable_intr_mv,
1771 	.get_config        = hptiop_get_config_mv,
1772 	.set_config        = hptiop_set_config_mv,
1773 	.iop_intr          = hptiop_intr_mv,
1774 	.post_msg          = hptiop_post_msg_mv,
1775 	.post_req          = hptiop_post_req_mv,
1776 	.do_ioctl          = hptiop_do_ioctl_mv,
1777 	.reset_comm        = 0,
1778 };
1779 
1780 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1781 	.family	           = MVFREY_BASED_IOP,
1782 	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1783 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1784 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1785 	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1786 	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1787 	.enable_intr       = hptiop_enable_intr_mvfrey,
1788 	.disable_intr      = hptiop_disable_intr_mvfrey,
1789 	.get_config        = hptiop_get_config_mvfrey,
1790 	.set_config        = hptiop_set_config_mvfrey,
1791 	.iop_intr          = hptiop_intr_mvfrey,
1792 	.post_msg          = hptiop_post_msg_mvfrey,
1793 	.post_req          = hptiop_post_req_mvfrey,
1794 	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1795 	.reset_comm        = hptiop_reset_comm_mvfrey,
1796 };
1797 
1798 static driver_t hptiop_pci_driver = {
1799 	driver_name,
1800 	driver_methods,
1801 	sizeof(struct hpt_iop_hba)
1802 };
1803 
1804 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, NULL, NULL);
1805 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1806 MODULE_VERSION(hptiop, 1);
1807 
1808 static int hptiop_probe(device_t dev)
1809 {
1810 	struct hpt_iop_hba *hba;
1811 	u_int32_t id;
1812 	static char buf[256];
1813 	int sas = 0;
1814 	struct hptiop_adapter_ops *ops;
1815 
1816 	if (pci_get_vendor(dev) != 0x1103)
1817 		return (ENXIO);
1818 
1819 	id = pci_get_device(dev);
1820 
1821 	switch (id) {
1822 		case 0x4520:
1823 		case 0x4522:
1824 			sas = 1;
1825 			ops = &hptiop_mvfrey_ops;
1826 			break;
1827 		case 0x4210:
1828 		case 0x4211:
1829 		case 0x4310:
1830 		case 0x4311:
1831 		case 0x4320:
1832 		case 0x4321:
1833 		case 0x4322:
1834 			sas = 1;
1835 		case 0x3220:
1836 		case 0x3320:
1837 		case 0x3410:
1838 		case 0x3520:
1839 		case 0x3510:
1840 		case 0x3511:
1841 		case 0x3521:
1842 		case 0x3522:
1843 		case 0x3530:
1844 		case 0x3540:
1845 		case 0x3560:
1846 			ops = &hptiop_itl_ops;
1847 			break;
1848 		case 0x3020:
1849 		case 0x3120:
1850 		case 0x3122:
1851 			ops = &hptiop_mv_ops;
1852 			break;
1853 		default:
1854 			return (ENXIO);
1855 	}
1856 
1857 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1858 		pci_get_bus(dev), pci_get_slot(dev),
1859 		pci_get_function(dev), pci_get_irq(dev));
1860 
1861 	ksprintf(buf, "RocketRAID %x %s Controller",
1862 				id, sas ? "SAS" : "SATA");
1863 	device_set_desc_copy(dev, buf);
1864 
1865 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1866 	bzero(hba, sizeof(struct hpt_iop_hba));
1867 	hba->ops = ops;
1868 
1869 	KdPrint(("hba->ops=%p\n", hba->ops));
1870 	return 0;
1871 }
1872 
1873 static int hptiop_attach(device_t dev)
1874 {
1875 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1876 	struct hpt_iop_request_get_config  iop_config;
1877 	struct hpt_iop_request_set_config  set_config;
1878 	int rid = 0;
1879 	struct cam_devq *devq;
1880 	struct ccb_setasync *ccb;
1881 	u_int32_t unit = device_get_unit(dev);
1882 
1883 	device_printf(dev, "RocketRAID 3xxx/4xxx controller driver %s\n",
1884 	    driver_version);
1885 
1886 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1887 		pci_get_bus(dev), pci_get_slot(dev),
1888 		pci_get_function(dev), hba->ops));
1889 
1890 	pci_enable_busmaster(dev);
1891 	hba->pcidev = dev;
1892 
1893 	if (hba->ops->alloc_pci_res(hba))
1894 		return ENXIO;
1895 
1896 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1897 		device_printf(dev, "adapter is not ready\n");
1898 		goto release_pci_res;
1899 	}
1900 
1901 	lockinit(&hba->lock, "hptioplock", 0, LK_CANRECURSE);
1902 
1903 	if (bus_dma_tag_create(NULL,/* parent */
1904 			1,  /* alignment */
1905 			0, /* boundary */
1906 			BUS_SPACE_MAXADDR,  /* lowaddr */
1907 			BUS_SPACE_MAXADDR,  /* highaddr */
1908 			NULL, NULL,         /* filter, filterarg */
1909 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1910 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1911 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1912 			0,      /* flags */
1913 			&hba->parent_dmat   /* tag */))
1914 	{
1915 		device_printf(dev, "alloc parent_dmat failed\n");
1916 		goto release_pci_res;
1917 	}
1918 
1919 	if (hba->ops->family == MV_BASED_IOP) {
1920 		if (hba->ops->internal_memalloc(hba)) {
1921 			device_printf(dev, "alloc srb_dmat failed\n");
1922 			goto destroy_parent_tag;
1923 		}
1924 	}
1925 
1926 	if (hba->ops->get_config(hba, &iop_config)) {
1927 		device_printf(dev, "get iop config failed.\n");
1928 		goto get_config_failed;
1929 	}
1930 
1931 	hba->firmware_version = iop_config.firmware_version;
1932 	hba->interface_version = iop_config.interface_version;
1933 	hba->max_requests = iop_config.max_requests;
1934 	hba->max_devices = iop_config.max_devices;
1935 	hba->max_request_size = iop_config.request_size;
1936 	hba->max_sg_count = iop_config.max_sg_count;
1937 
1938 	if (hba->ops->family == MVFREY_BASED_IOP) {
1939 		if (hba->ops->internal_memalloc(hba)) {
1940 			device_printf(dev, "alloc srb_dmat failed\n");
1941 			goto destroy_parent_tag;
1942 		}
1943 		if (hba->ops->reset_comm(hba)) {
1944 			device_printf(dev, "reset comm failed\n");
1945 			goto get_config_failed;
1946 		}
1947 	}
1948 
1949 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1950 			4,  /* alignment */
1951 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1952 			BUS_SPACE_MAXADDR,  /* lowaddr */
1953 			BUS_SPACE_MAXADDR,  /* highaddr */
1954 			NULL, NULL,         /* filter, filterarg */
1955 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1956 			hba->max_sg_count,  /* nsegments */
1957 			0x20000,    /* maxsegsize */
1958 			BUS_DMA_ALLOCNOW,       /* flags */
1959 			&hba->io_dmat   /* tag */))
1960 	{
1961 		device_printf(dev, "alloc io_dmat failed\n");
1962 		goto get_config_failed;
1963 	}
1964 
1965 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1966 			1,  /* alignment */
1967 			0, /* boundary */
1968 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1969 			BUS_SPACE_MAXADDR,  /* highaddr */
1970 			NULL, NULL,         /* filter, filterarg */
1971 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1972 			1,  /* nsegments */
1973 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1974 			0,      /* flags */
1975 			&hba->srb_dmat  /* tag */))
1976 	{
1977 		device_printf(dev, "alloc srb_dmat failed\n");
1978 		goto destroy_io_dmat;
1979 	}
1980 
1981 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1982 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1983 			&hba->srb_dmamap) != 0)
1984 	{
1985 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1986 		goto destroy_srb_dmat;
1987 	}
1988 
1989 	if (bus_dmamap_load(hba->srb_dmat,
1990 			hba->srb_dmamap, hba->uncached_ptr,
1991 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1992 			hptiop_map_srb, hba, 0))
1993 	{
1994 		device_printf(dev, "bus_dmamap_load failed!\n");
1995 		goto srb_dmamem_free;
1996 	}
1997 
1998 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1999 		device_printf(dev, "cam_simq_alloc failed\n");
2000 		goto srb_dmamap_unload;
2001 	}
2002 
2003 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2004 			hba, unit, &sim_mplock, hba->max_requests - 1, 1, devq);
2005 	cam_simq_release(devq);
2006 	if (!hba->sim) {
2007 		device_printf(dev, "cam_sim_alloc failed\n");
2008 		goto srb_dmamap_unload;
2009 	}
2010 	if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
2011 	{
2012 		device_printf(dev, "xpt_bus_register failed\n");
2013 		goto free_cam_sim;
2014 	}
2015 
2016 	if (xpt_create_path(&hba->path, /*periph */ NULL,
2017 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2018 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2019 		device_printf(dev, "xpt_create_path failed\n");
2020 		goto deregister_xpt_bus;
2021 	}
2022 
2023 	bzero(&set_config, sizeof(set_config));
2024 	set_config.iop_id = unit;
2025 	set_config.vbus_id = cam_sim_path(hba->sim);
2026 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2027 
2028 	if (hba->ops->set_config(hba, &set_config)) {
2029 		device_printf(dev, "set iop config failed.\n");
2030 		goto free_hba_path;
2031 	}
2032 
2033 	ccb = &xpt_alloc_ccb()->csa;
2034 
2035 	xpt_setup_ccb(&ccb->ccb_h, hba->path, /*priority*/5);
2036 	ccb->ccb_h.func_code = XPT_SASYNC_CB;
2037 	ccb->event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2038 	ccb->callback = hptiop_async;
2039 	ccb->callback_arg = hba->sim;
2040 	xpt_action((union ccb *)ccb);
2041 	xpt_free_ccb(&ccb->ccb_h);
2042 
2043 	rid = 0;
2044 	if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
2045 			&rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2046 		device_printf(dev, "allocate irq failed!\n");
2047 		goto free_hba_path;
2048 	}
2049 
2050 	if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
2051 				hptiop_pci_intr, hba, &hba->irq_handle, NULL))
2052 	{
2053 		device_printf(dev, "allocate intr function failed!\n");
2054 		goto free_irq_resource;
2055 	}
2056 
2057 	if (hptiop_send_sync_msg(hba,
2058 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2059 		device_printf(dev, "fail to start background task\n");
2060 		goto teartown_irq_resource;
2061 	}
2062 
2063 	hba->ops->enable_intr(hba);
2064 	hba->initialized = 1;
2065 
2066 	hba->ioctl_dev = make_dev(&hptiop_ops, unit,
2067 				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2068 				S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2069 
2070 	hba->ioctl_dev->si_drv1 = hba;
2071 
2072 	hptiop_rescan_bus(hba);
2073 
2074 	return 0;
2075 
2076 
2077 teartown_irq_resource:
2078 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2079 
2080 free_irq_resource:
2081 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2082 
2083 free_hba_path:
2084 	xpt_free_path(hba->path);
2085 
2086 deregister_xpt_bus:
2087 	xpt_bus_deregister(cam_sim_path(hba->sim));
2088 
2089 free_cam_sim:
2090 	cam_sim_free(hba->sim);
2091 
2092 srb_dmamap_unload:
2093 	if (hba->uncached_ptr)
2094 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2095 
2096 srb_dmamem_free:
2097 	if (hba->uncached_ptr)
2098 		bus_dmamem_free(hba->srb_dmat,
2099 			hba->uncached_ptr, hba->srb_dmamap);
2100 
2101 destroy_srb_dmat:
2102 	if (hba->srb_dmat)
2103 		bus_dma_tag_destroy(hba->srb_dmat);
2104 
2105 destroy_io_dmat:
2106 	if (hba->io_dmat)
2107 		bus_dma_tag_destroy(hba->io_dmat);
2108 
2109 get_config_failed:
2110 	hba->ops->internal_memfree(hba);
2111 
2112 destroy_parent_tag:
2113 	if (hba->parent_dmat)
2114 		bus_dma_tag_destroy(hba->parent_dmat);
2115 
2116 release_pci_res:
2117 	if (hba->ops->release_pci_res)
2118 		hba->ops->release_pci_res(hba);
2119 
2120 	return ENXIO;
2121 }
2122 
2123 static int hptiop_detach(device_t dev)
2124 {
2125 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2126 	int i;
2127 	int error = EBUSY;
2128 
2129 	hptiop_lock_adapter(hba);
2130 	for (i = 0; i < hba->max_devices; i++)
2131 		if (hptiop_os_query_remove_device(hba, i)) {
2132 			device_printf(dev, "file system is busy. id=%d", i);
2133 			goto out;
2134 		}
2135 
2136 	if ((error = hptiop_shutdown(dev)) != 0)
2137 		goto out;
2138 	if (hptiop_send_sync_msg(hba,
2139 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2140 		goto out;
2141 
2142 	hptiop_release_resource(hba);
2143 	error = 0;
2144 out:
2145 	hptiop_unlock_adapter(hba);
2146 	return error;
2147 }
2148 
2149 static int hptiop_shutdown(device_t dev)
2150 {
2151 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2152 
2153 	int error = 0;
2154 
2155 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2156 		device_printf(dev, "device is busy");
2157 		return EBUSY;
2158 	}
2159 
2160 	hba->ops->disable_intr(hba);
2161 
2162 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2163 		error = EBUSY;
2164 
2165 	return error;
2166 }
2167 
2168 static void hptiop_pci_intr(void *arg)
2169 {
2170 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2171 	hptiop_lock_adapter(hba);
2172 	hba->ops->iop_intr(hba);
2173 	hptiop_unlock_adapter(hba);
2174 }
2175 
2176 static void hptiop_poll(struct cam_sim *sim)
2177 {
2178 	hptiop_pci_intr(cam_sim_softc(sim));
2179 }
2180 
2181 static void hptiop_async(void * callback_arg, u_int32_t code,
2182 					struct cam_path * path, void * arg)
2183 {
2184 }
2185 
2186 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2187 {
2188 	BUS_SPACE_WRT4_ITL(outbound_intmask,
2189 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2190 }
2191 
2192 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2193 {
2194 	u_int32_t int_mask;
2195 
2196 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2197 
2198 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2199 			| MVIOP_MU_OUTBOUND_INT_MSG;
2200 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2201 }
2202 
2203 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2204 {
2205 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2206 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2207 
2208 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2209 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2210 
2211 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2212 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2213 }
2214 
2215 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2216 {
2217 	u_int32_t int_mask;
2218 
2219 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2220 
2221 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2222 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2223 	BUS_SPACE_RD4_ITL(outbound_intstatus);
2224 }
2225 
2226 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2227 {
2228 	u_int32_t int_mask;
2229 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2230 
2231 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2232 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2233 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2234 	BUS_SPACE_RD4_MV0(outbound_intmask);
2235 }
2236 
2237 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2238 {
2239 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2240 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2241 
2242 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2243 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2244 
2245 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2246 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2247 }
2248 
2249 static void hptiop_reset_adapter(void *argv)
2250 {
2251 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2252 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2253 		return;
2254 	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2255 }
2256 
2257 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2258 {
2259 	struct hpt_iop_srb * srb;
2260 
2261 	if (hba->srb_list) {
2262 		srb = hba->srb_list;
2263 		hba->srb_list = srb->next;
2264 		return srb;
2265 	}
2266 
2267 	return NULL;
2268 }
2269 
2270 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2271 {
2272 	srb->next = hba->srb_list;
2273 	hba->srb_list = srb;
2274 }
2275 
2276 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2277 {
2278 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2279 	struct hpt_iop_srb * srb;
2280 
2281 	switch (ccb->ccb_h.func_code) {
2282 
2283 	case XPT_SCSI_IO:
2284 		hptiop_lock_adapter(hba);
2285 		if (ccb->ccb_h.target_lun != 0 ||
2286 			ccb->ccb_h.target_id >= hba->max_devices ||
2287 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2288 		{
2289 			ccb->ccb_h.status = CAM_TID_INVALID;
2290 			xpt_done(ccb);
2291 			goto scsi_done;
2292 		}
2293 
2294 		if ((srb = hptiop_get_srb(hba)) == NULL) {
2295 			device_printf(hba->pcidev, "srb allocated failed");
2296 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2297 			xpt_done(ccb);
2298 			goto scsi_done;
2299 		}
2300 
2301 		srb->ccb = ccb;
2302 
2303 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
2304 			hptiop_post_scsi_command(srb, NULL, 0, 0);
2305 		else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2306 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2307 				int error;
2308 
2309 				error = bus_dmamap_load(hba->io_dmat,
2310 						srb->dma_map,
2311 						ccb->csio.data_ptr,
2312 						ccb->csio.dxfer_len,
2313 						hptiop_post_scsi_command,
2314 						srb, 0);
2315 
2316 				if (error && error != EINPROGRESS) {
2317 					device_printf(hba->pcidev,
2318 					    "bus_dmamap_load error %d", error);
2319 					xpt_freeze_simq(hba->sim, 1);
2320 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2321 invalid:
2322 					hptiop_free_srb(hba, srb);
2323 					xpt_done(ccb);
2324 					goto scsi_done;
2325 				}
2326 			}
2327 			else {
2328 				device_printf(hba->pcidev,
2329 					"CAM_DATA_PHYS not supported");
2330 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2331 				goto invalid;
2332 			}
2333 		}
2334 		else {
2335 			struct bus_dma_segment *segs;
2336 
2337 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
2338 				(ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2339 				device_printf(hba->pcidev, "SCSI cmd failed");
2340 				ccb->ccb_h.status=CAM_PROVIDE_FAIL;
2341 				goto invalid;
2342 			}
2343 
2344 			segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
2345 			hptiop_post_scsi_command(srb, segs,
2346 						ccb->csio.sglist_cnt, 0);
2347 		}
2348 
2349 scsi_done:
2350 		hptiop_unlock_adapter(hba);
2351 		return;
2352 
2353 	case XPT_RESET_BUS:
2354 		device_printf(hba->pcidev, "reset adapter");
2355 		hptiop_lock_adapter(hba);
2356 		hba->msg_done = 0;
2357 		hptiop_reset_adapter(hba);
2358 		hptiop_unlock_adapter(hba);
2359 		break;
2360 
2361 	case XPT_GET_TRAN_SETTINGS:
2362 	case XPT_SET_TRAN_SETTINGS:
2363 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2364 		break;
2365 
2366 	case XPT_CALC_GEOMETRY:
2367 		cam_calc_geometry(&ccb->ccg, 1);
2368 		break;
2369 
2370 	case XPT_PATH_INQ:
2371 	{
2372 		struct ccb_pathinq *cpi = &ccb->cpi;
2373 
2374 		cpi->version_num = 1;
2375 		cpi->hba_inquiry = PI_SDTR_ABLE;
2376 		cpi->target_sprt = 0;
2377 		cpi->hba_misc = PIM_NOBUSRESET;
2378 		cpi->hba_eng_cnt = 0;
2379 		cpi->max_target = hba->max_devices;
2380 		cpi->max_lun = 0;
2381 		cpi->unit_number = cam_sim_unit(sim);
2382 		cpi->bus_id = cam_sim_bus(sim);
2383 		cpi->initiator_id = hba->max_devices;
2384 		cpi->base_transfer_speed = 3300;
2385 
2386 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2387 		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2388 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2389 		cpi->transport = XPORT_SPI;
2390 		cpi->transport_version = 2;
2391 		cpi->protocol = PROTO_SCSI;
2392 		cpi->protocol_version = SCSI_REV_2;
2393 		cpi->ccb_h.status = CAM_REQ_CMP;
2394 		break;
2395 	}
2396 
2397 	default:
2398 		ccb->ccb_h.status = CAM_REQ_INVALID;
2399 		break;
2400 	}
2401 
2402 	xpt_done(ccb);
2403 	return;
2404 }
2405 
2406 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2407 				struct hpt_iop_srb *srb,
2408 				bus_dma_segment_t *segs, int nsegs)
2409 {
2410 	int idx;
2411 	union ccb *ccb = srb->ccb;
2412 	u_int8_t *cdb;
2413 
2414 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2415 		cdb = ccb->csio.cdb_io.cdb_ptr;
2416 	else
2417 		cdb = ccb->csio.cdb_io.cdb_bytes;
2418 
2419 	KdPrint(("ccb=%p %x-%x-%x\n",
2420 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2421 
2422 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2423 		u_int32_t iop_req32;
2424 		struct hpt_iop_request_scsi_command req;
2425 
2426 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2427 
2428 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2429 			device_printf(hba->pcidev, "invalid req offset\n");
2430 			ccb->ccb_h.status = CAM_BUSY;
2431 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2432 			hptiop_free_srb(hba, srb);
2433 			xpt_done(ccb);
2434 			return;
2435 		}
2436 
2437 		if (ccb->csio.dxfer_len && nsegs > 0) {
2438 			struct hpt_iopsg *psg = req.sg_list;
2439 			for (idx = 0; idx < nsegs; idx++, psg++) {
2440 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2441 				psg->size = segs[idx].ds_len;
2442 				psg->eot = 0;
2443 			}
2444 			psg[-1].eot = 1;
2445 		}
2446 
2447 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2448 
2449 		req.header.size =
2450 				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2451 				+ nsegs*sizeof(struct hpt_iopsg);
2452 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2453 		req.header.flags = 0;
2454 		req.header.result = IOP_RESULT_PENDING;
2455 		req.header.context = (u_int64_t)(unsigned long)srb;
2456 		req.dataxfer_length = ccb->csio.dxfer_len;
2457 		req.channel =  0;
2458 		req.target =  ccb->ccb_h.target_id;
2459 		req.lun =  ccb->ccb_h.target_lun;
2460 
2461 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2462 			(u_int8_t *)&req, req.header.size);
2463 
2464 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2465 			bus_dmamap_sync(hba->io_dmat,
2466 				srb->dma_map, BUS_DMASYNC_PREREAD);
2467 		}
2468 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2469 			bus_dmamap_sync(hba->io_dmat,
2470 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2471 
2472 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2473 	} else {
2474 		struct hpt_iop_request_scsi_command *req;
2475 
2476 		req = (struct hpt_iop_request_scsi_command *)srb;
2477 		if (ccb->csio.dxfer_len && nsegs > 0) {
2478 			struct hpt_iopsg *psg = req->sg_list;
2479 			for (idx = 0; idx < nsegs; idx++, psg++) {
2480 				psg->pci_address =
2481 					(u_int64_t)segs[idx].ds_addr;
2482 				psg->size = segs[idx].ds_len;
2483 				psg->eot = 0;
2484 			}
2485 			psg[-1].eot = 1;
2486 		}
2487 
2488 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2489 
2490 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2491 		req->header.result = IOP_RESULT_PENDING;
2492 		req->dataxfer_length = ccb->csio.dxfer_len;
2493 		req->channel =  0;
2494 		req->target =  ccb->ccb_h.target_id;
2495 		req->lun =  ccb->ccb_h.target_lun;
2496 		req->header.size =
2497 			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2498 			+ nsegs*sizeof(struct hpt_iopsg);
2499 		req->header.context = (u_int64_t)srb->index |
2500 						IOPMU_QUEUE_ADDR_HOST_BIT;
2501 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2502 
2503 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2504 			bus_dmamap_sync(hba->io_dmat,
2505 				srb->dma_map, BUS_DMASYNC_PREREAD);
2506 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2507 			bus_dmamap_sync(hba->io_dmat,
2508 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2509 		}
2510 
2511 		if (hba->firmware_version > 0x01020000
2512 			|| hba->interface_version > 0x01020000) {
2513 			u_int32_t size_bits;
2514 
2515 			if (req->header.size < 256)
2516 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2517 			else if (req->header.size < 512)
2518 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2519 			else
2520 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2521 						| IOPMU_QUEUE_ADDR_HOST_BIT;
2522 
2523 			BUS_SPACE_WRT4_ITL(inbound_queue,
2524 				(u_int32_t)srb->phy_addr | size_bits);
2525 		} else
2526 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2527 				|IOPMU_QUEUE_ADDR_HOST_BIT);
2528 	}
2529 }
2530 
2531 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2532 				struct hpt_iop_srb *srb,
2533 				bus_dma_segment_t *segs, int nsegs)
2534 {
2535 	int idx, size;
2536 	union ccb *ccb = srb->ccb;
2537 	u_int8_t *cdb;
2538 	struct hpt_iop_request_scsi_command *req;
2539 	u_int64_t req_phy;
2540 
2541 	req = (struct hpt_iop_request_scsi_command *)srb;
2542 	req_phy = srb->phy_addr;
2543 
2544 	if (ccb->csio.dxfer_len && nsegs > 0) {
2545 		struct hpt_iopsg *psg = req->sg_list;
2546 		for (idx = 0; idx < nsegs; idx++, psg++) {
2547 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2548 			psg->size = segs[idx].ds_len;
2549 			psg->eot = 0;
2550 		}
2551 		psg[-1].eot = 1;
2552 	}
2553 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2554 		cdb = ccb->csio.cdb_io.cdb_ptr;
2555 	else
2556 		cdb = ccb->csio.cdb_io.cdb_bytes;
2557 
2558 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2559 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2560 	req->header.result = IOP_RESULT_PENDING;
2561 	req->dataxfer_length = ccb->csio.dxfer_len;
2562 	req->channel = 0;
2563 	req->target =  ccb->ccb_h.target_id;
2564 	req->lun =  ccb->ccb_h.target_lun;
2565 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2566 				- sizeof(struct hpt_iopsg)
2567 				+ nsegs * sizeof(struct hpt_iopsg);
2568 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2569 		bus_dmamap_sync(hba->io_dmat,
2570 			srb->dma_map, BUS_DMASYNC_PREREAD);
2571 	}
2572 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2573 		bus_dmamap_sync(hba->io_dmat,
2574 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2575 	req->header.context = (u_int64_t)srb->index
2576 					<< MVIOP_REQUEST_NUMBER_START_BIT
2577 					| MVIOP_CMD_TYPE_SCSI;
2578 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2579 	size = req->header.size >> 8;
2580 	hptiop_mv_inbound_write(req_phy
2581 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2582 			| (size > 3 ? 3 : size), hba);
2583 }
2584 
2585 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2586 				struct hpt_iop_srb *srb,
2587 				bus_dma_segment_t *segs, int nsegs)
2588 {
2589 	int idx, index;
2590 	union ccb *ccb = srb->ccb;
2591 	u_int8_t *cdb;
2592 	struct hpt_iop_request_scsi_command *req;
2593 	u_int64_t req_phy;
2594 
2595 	req = (struct hpt_iop_request_scsi_command *)srb;
2596 	req_phy = srb->phy_addr;
2597 
2598 	if (ccb->csio.dxfer_len && nsegs > 0) {
2599 		struct hpt_iopsg *psg = req->sg_list;
2600 		for (idx = 0; idx < nsegs; idx++, psg++) {
2601 			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2602 			psg->size = segs[idx].ds_len;
2603 			psg->eot = 0;
2604 		}
2605 		psg[-1].eot = 1;
2606 	}
2607 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2608 		cdb = ccb->csio.cdb_io.cdb_ptr;
2609 	else
2610 		cdb = ccb->csio.cdb_io.cdb_bytes;
2611 
2612 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2613 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2614 	req->header.result = IOP_RESULT_PENDING;
2615 	req->dataxfer_length = ccb->csio.dxfer_len;
2616 	req->channel = 0;
2617 	req->target = ccb->ccb_h.target_id;
2618 	req->lun = ccb->ccb_h.target_lun;
2619 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2620 				- sizeof(struct hpt_iopsg)
2621 				+ nsegs * sizeof(struct hpt_iopsg);
2622 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2623 		bus_dmamap_sync(hba->io_dmat,
2624 			srb->dma_map, BUS_DMASYNC_PREREAD);
2625 	}
2626 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2627 		bus_dmamap_sync(hba->io_dmat,
2628 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2629 
2630 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2631 						| IOP_REQUEST_FLAG_ADDR_BITS
2632 						| ((req_phy >> 16) & 0xffff0000);
2633 	req->header.context = ((req_phy & 0xffffffff) << 32 )
2634 						| srb->index << 4
2635 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2636 
2637 	hba->u.mvfrey.inlist_wptr++;
2638 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2639 
2640 	if (index == hba->u.mvfrey.list_count) {
2641 		index = 0;
2642 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2643 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2644 	}
2645 
2646 	hba->u.mvfrey.inlist[index].addr = req_phy;
2647 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2648 
2649 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2650 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2651 
2652 	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2653 		callout_reset(ccb->ccb_h.timeout_ch, 20 * hz,
2654 			      hptiop_reset_adapter, hba);
2655 	}
2656 }
2657 
2658 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2659 					int nsegs, int error)
2660 {
2661 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2662 	union ccb *ccb = srb->ccb;
2663 	struct hpt_iop_hba *hba = srb->hba;
2664 
2665 	if (error || nsegs > hba->max_sg_count) {
2666 		KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2667 			ccb->ccb_h.func_code,
2668 			ccb->ccb_h.target_id,
2669 			ccb->ccb_h.target_lun, nsegs));
2670 		ccb->ccb_h.status = CAM_BUSY;
2671 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2672 		hptiop_free_srb(hba, srb);
2673 		xpt_done(ccb);
2674 		return;
2675 	}
2676 
2677 	hba->ops->post_req(hba, srb, segs, nsegs);
2678 }
2679 
2680 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2681 				int nsegs, int error)
2682 {
2683 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2684 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2685 				& ~(u_int64_t)0x1F;
2686 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2687 				& ~0x1F);
2688 }
2689 
2690 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2691 				int nsegs, int error)
2692 {
2693 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2694 	char *p;
2695 	u_int64_t phy;
2696 	u_int32_t list_count = hba->u.mvfrey.list_count;
2697 
2698 	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2699 				& ~(u_int64_t)0x1F;
2700 	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2701 				& ~0x1F);
2702 
2703 	hba->ctlcfgcmd_phy = phy;
2704 	hba->ctlcfg_ptr = p;
2705 
2706 	p += 0x800;
2707 	phy += 0x800;
2708 
2709 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2710 	hba->u.mvfrey.inlist_phy = phy;
2711 
2712 	p += list_count * sizeof(struct mvfrey_inlist_entry);
2713 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2714 
2715 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2716 	hba->u.mvfrey.outlist_phy = phy;
2717 
2718 	p += list_count * sizeof(struct mvfrey_outlist_entry);
2719 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2720 
2721 	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2722 	hba->u.mvfrey.outlist_cptr_phy = phy;
2723 }
2724 
2725 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2726 				int nsegs, int error)
2727 {
2728 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2729 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2730 	struct hpt_iop_srb *srb, *tmp_srb;
2731 	int i;
2732 
2733 	if (error || nsegs == 0) {
2734 		device_printf(hba->pcidev, "hptiop_map_srb error");
2735 		return;
2736 	}
2737 
2738 	/* map srb */
2739 	srb = (struct hpt_iop_srb *)
2740 		(((unsigned long)hba->uncached_ptr + 0x1F)
2741 		& ~(unsigned long)0x1F);
2742 
2743 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2744 		tmp_srb = (struct hpt_iop_srb *)
2745 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2746 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2747 			if (bus_dmamap_create(hba->io_dmat,
2748 						0, &tmp_srb->dma_map)) {
2749 				device_printf(hba->pcidev, "dmamap create failed");
2750 				return;
2751 			}
2752 
2753 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2754 			tmp_srb->hba = hba;
2755 			tmp_srb->index = i;
2756 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2757 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2758 							(phy_addr >> 5);
2759 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2760 					tmp_srb->srb_flag =
2761 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2762 			} else {
2763 				tmp_srb->phy_addr = phy_addr;
2764 			}
2765 
2766 			hptiop_free_srb(hba, tmp_srb);
2767 			hba->srb[i] = tmp_srb;
2768 			phy_addr += HPT_SRB_MAX_SIZE;
2769 		}
2770 		else {
2771 			device_printf(hba->pcidev, "invalid alignment");
2772 			return;
2773 		}
2774 	}
2775 }
2776 
2777 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2778 {
2779 	hba->msg_done = 1;
2780 }
2781 
2782 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2783 						int target_id)
2784 {
2785 	struct cam_periph       *periph = NULL;
2786 	struct cam_path         *path;
2787 	int                     status, retval = 0;
2788 
2789 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2790 
2791 	if (status == CAM_REQ_CMP) {
2792 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2793 			if (periph->refcount >= 1) {
2794 				device_printf(hba->pcidev, "target_id=0x%x,"
2795 				    "refcount=%d", target_id, periph->refcount);
2796 				retval = -1;
2797 			}
2798 		}
2799 		xpt_free_path(path);
2800 	}
2801 	return retval;
2802 }
2803 
2804 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2805 {
2806 	int i;
2807 	if (hba->path) {
2808 		struct ccb_setasync *ccb;
2809 
2810 		ccb = &xpt_alloc_ccb()->csa;
2811 		xpt_setup_ccb(&ccb->ccb_h, hba->path, /*priority*/5);
2812 		ccb->ccb_h.func_code = XPT_SASYNC_CB;
2813 		ccb->event_enable = 0;
2814 		ccb->callback = hptiop_async;
2815 		ccb->callback_arg = hba->sim;
2816 		xpt_action((union ccb *)ccb);
2817 		xpt_free_path(hba->path);
2818 		xpt_free_ccb(&ccb->ccb_h);
2819 	}
2820 
2821 	if (hba->sim) {
2822 		xpt_bus_deregister(cam_sim_path(hba->sim));
2823 		cam_sim_free(hba->sim);
2824 	}
2825 
2826 	if (hba->ctlcfg_dmat) {
2827 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2828 		bus_dmamem_free(hba->ctlcfg_dmat,
2829 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2830 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2831 	}
2832 
2833 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2834 		struct hpt_iop_srb *srb = hba->srb[i];
2835 		if (srb->dma_map)
2836 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2837 	}
2838 
2839 	if (hba->srb_dmat) {
2840 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2841 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2842 		bus_dma_tag_destroy(hba->srb_dmat);
2843 	}
2844 
2845 	if (hba->io_dmat)
2846 		bus_dma_tag_destroy(hba->io_dmat);
2847 
2848 	if (hba->parent_dmat)
2849 		bus_dma_tag_destroy(hba->parent_dmat);
2850 
2851 	if (hba->irq_handle)
2852 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2853 
2854 	if (hba->irq_res)
2855 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2856 					0, hba->irq_res);
2857 
2858 	if (hba->bar0_res)
2859 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2860 					hba->bar0_rid, hba->bar0_res);
2861 	if (hba->bar2_res)
2862 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2863 					hba->bar2_rid, hba->bar2_res);
2864 	if (hba->ioctl_dev)
2865 		destroy_dev(hba->ioctl_dev);
2866 	dev_ops_remove_minor(&hptiop_ops, device_get_unit(hba->pcidev));
2867 }
2868