xref: /dragonfly/sys/dev/raid/hptiop/hptiop.c (revision 03517d4e)
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.15 2012/10/25 17:29:11 delphij Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/cons.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 
35 #include <sys/stat.h>
36 #include <sys/malloc.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 
40 #include <sys/kthread.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
43 
44 #include <sys/eventhandler.h>
45 #include <sys/bus.h>
46 #include <sys/taskqueue.h>
47 #include <sys/device.h>
48 #include <sys/mplock2.h>
49 
50 #include <machine/stdarg.h>
51 #include <sys/rman.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 
56 #include <bus/pci/pcireg.h>
57 #include <bus/pci/pcivar.h>
58 
59 #include <bus/cam/cam.h>
60 #include <bus/cam/cam_ccb.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_periph.h>
63 #include <bus/cam/cam_xpt_sim.h>
64 #include <bus/cam/cam_debug.h>
65 #include <bus/cam/cam_periph.h>
66 #include <bus/cam/scsi/scsi_all.h>
67 #include <bus/cam/scsi/scsi_message.h>
68 
69 #include <dev/raid/hptiop/hptiop.h>
70 
71 static const char driver_name[] = "hptiop";
72 static const char driver_version[] = "v1.8";
73 
74 static devclass_t hptiop_devclass;
75 
76 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
77 				u_int32_t msg, u_int32_t millisec);
78 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
79 							u_int32_t req);
80 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
81 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
82 							u_int32_t req);
83 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
84 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
85 				struct hpt_iop_ioctl_param *pParams);
86 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
87 				struct hpt_iop_ioctl_param *pParams);
88 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
89 				struct hpt_iop_ioctl_param *pParams);
90 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
91 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
92 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
93 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
94 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
95 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
96 				struct hpt_iop_request_get_config *config);
97 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
98 				struct hpt_iop_request_get_config *config);
99 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
100 				struct hpt_iop_request_get_config *config);
101 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
102 				struct hpt_iop_request_set_config *config);
103 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
104 				struct hpt_iop_request_set_config *config);
105 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
106 				struct hpt_iop_request_set_config *config);
107 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
108 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
109 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
110 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
112 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
113 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
114 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
115 				struct hpt_iop_request_ioctl_command *req,
116 				struct hpt_iop_ioctl_param *pParams);
117 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
118 				struct hpt_iop_request_ioctl_command *req,
119 				struct hpt_iop_ioctl_param *pParams);
120 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
121 				struct hpt_iop_srb *srb,
122 				bus_dma_segment_t *segs, int nsegs);
123 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
124 				struct hpt_iop_srb *srb,
125 				bus_dma_segment_t *segs, int nsegs);
126 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
127 				struct hpt_iop_srb *srb,
128 				bus_dma_segment_t *segs, int nsegs);
129 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
130 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
131 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
133 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
138 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
139 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
140 static int  hptiop_probe(device_t dev);
141 static int  hptiop_attach(device_t dev);
142 static int  hptiop_detach(device_t dev);
143 static int  hptiop_shutdown(device_t dev);
144 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
145 static void hptiop_poll(struct cam_sim *sim);
146 static void hptiop_async(void *callback_arg, u_int32_t code,
147 					struct cam_path *path, void *arg);
148 static void hptiop_pci_intr(void *arg);
149 static void hptiop_release_resource(struct hpt_iop_hba *hba);
150 static void hptiop_reset_adapter(void *argv);
151 static d_open_t hptiop_open;
152 static d_close_t hptiop_close;
153 static d_ioctl_t hptiop_ioctl;
154 
155 static struct dev_ops hptiop_ops = {
156 	{ driver_name, 0, 0 },
157 	.d_open = hptiop_open,
158 	.d_close = hptiop_close,
159 	.d_ioctl = hptiop_ioctl,
160 };
161 
162 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
163 
164 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
165 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
166 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
167 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
168 
169 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
170 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
171 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
172 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
173 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
174 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
175 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
176 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
177 
178 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
179 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
180 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
181 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
182 
183 static int hptiop_open(struct dev_open_args *ap)
184 {
185 	cdev_t dev = ap->a_head.a_dev;
186 	struct hpt_iop_hba *hba = hba_from_dev(dev);
187 
188 	if (hba==NULL)
189 		return ENXIO;
190 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
191 		return EBUSY;
192 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
193 	return 0;
194 }
195 
196 static int hptiop_close(struct dev_close_args *ap)
197 {
198 	cdev_t dev = ap->a_head.a_dev;
199 	struct hpt_iop_hba *hba = hba_from_dev(dev);
200 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
201 	return 0;
202 }
203 
204 static int hptiop_ioctl(struct dev_ioctl_args *ap)
205 {
206 	cdev_t dev = ap->a_head.a_dev;
207 	u_long cmd = ap->a_cmd;
208 	caddr_t data = ap->a_data;
209 	int ret = EFAULT;
210 	struct hpt_iop_hba *hba = hba_from_dev(dev);
211 
212 	get_mplock();
213 
214 	switch (cmd) {
215 	case HPT_DO_IOCONTROL:
216 		ret = hba->ops->do_ioctl(hba,
217 				(struct hpt_iop_ioctl_param *)data);
218 		break;
219 	case HPT_SCAN_BUS:
220 		ret = hptiop_rescan_bus(hba);
221 		break;
222 	}
223 
224 	rel_mplock();
225 
226 	return ret;
227 }
228 
229 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
230 {
231 	u_int64_t p;
232 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
233 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
234 
235 	if (outbound_tail != outbound_head) {
236 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
237 			offsetof(struct hpt_iopmu_mv,
238 				outbound_q[outbound_tail]),
239 			(u_int32_t *)&p, 2);
240 
241 		outbound_tail++;
242 
243 		if (outbound_tail == MVIOP_QUEUE_LEN)
244 			outbound_tail = 0;
245 
246 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
247 		return p;
248 	} else
249 		return 0;
250 }
251 
252 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
253 {
254 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
255 	u_int32_t head = inbound_head + 1;
256 
257 	if (head == MVIOP_QUEUE_LEN)
258 		head = 0;
259 
260 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
261 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
262 			(u_int32_t *)&p, 2);
263 	BUS_SPACE_WRT4_MV2(inbound_head, head);
264 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
265 }
266 
267 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
268 {
269 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
270 	BUS_SPACE_RD4_ITL(outbound_intstatus);
271 }
272 
273 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
274 {
275 
276 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
277 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
278 
279 	BUS_SPACE_RD4_MV0(outbound_intmask);
280 }
281 
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
283 {
284 	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
285 	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
286 }
287 
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
289 {
290 	u_int32_t req=0;
291 	int i;
292 
293 	for (i = 0; i < millisec; i++) {
294 		req = BUS_SPACE_RD4_ITL(inbound_queue);
295 		if (req != IOPMU_QUEUE_EMPTY)
296 			break;
297 		DELAY(1000);
298 	}
299 
300 	if (req!=IOPMU_QUEUE_EMPTY) {
301 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
302 		BUS_SPACE_RD4_ITL(outbound_intstatus);
303 		return 0;
304 	}
305 
306 	return -1;
307 }
308 
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
310 {
311 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
312 		return -1;
313 
314 	return 0;
315 }
316 
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
318 							u_int32_t millisec)
319 {
320 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 		return -1;
322 
323 	return 0;
324 }
325 
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 							u_int32_t index)
328 {
329 	struct hpt_iop_srb *srb;
330 	struct hpt_iop_request_scsi_command *req=NULL;
331 	union ccb *ccb;
332 	u_int8_t *cdb;
333 	u_int32_t result, temp, dxfer;
334 	u_int64_t temp64;
335 
336 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 		if (hba->firmware_version > 0x01020000 ||
338 			hba->interface_version > 0x01020000) {
339 			srb = hba->srb[index & ~(u_int32_t)
340 				(IOPMU_QUEUE_ADDR_HOST_BIT
341 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 			req = (struct hpt_iop_request_scsi_command *)srb;
343 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 				result = IOP_RESULT_SUCCESS;
345 			else
346 				result = req->header.result;
347 		} else {
348 			srb = hba->srb[index &
349 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 			req = (struct hpt_iop_request_scsi_command *)srb;
351 			result = req->header.result;
352 		}
353 		dxfer = req->dataxfer_length;
354 		goto srb_complete;
355 	}
356 
357 	/*iop req*/
358 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 		offsetof(struct hpt_iop_request_header, type));
360 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 		offsetof(struct hpt_iop_request_header, result));
362 	switch(temp) {
363 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364 	{
365 		temp64 = 0;
366 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 			offsetof(struct hpt_iop_request_header, context),
368 			(u_int32_t *)&temp64, 2);
369 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 		break;
371 	}
372 
373 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 			offsetof(struct hpt_iop_request_header, context),
376 			(u_int32_t *)&temp64, 2);
377 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 				index + offsetof(struct hpt_iop_request_scsi_command,
380 				dataxfer_length));
381 srb_complete:
382 		ccb = (union ccb *)srb->ccb;
383 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 			cdb = ccb->csio.cdb_io.cdb_ptr;
385 		else
386 			cdb = ccb->csio.cdb_io.cdb_bytes;
387 
388 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 			ccb->ccb_h.status = CAM_REQ_CMP;
390 			goto scsi_done;
391 		}
392 
393 		switch (result) {
394 		case IOP_RESULT_SUCCESS:
395 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 			case CAM_DIR_IN:
397 				bus_dmamap_sync(hba->io_dmat,
398 					srb->dma_map, BUS_DMASYNC_POSTREAD);
399 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 				break;
401 			case CAM_DIR_OUT:
402 				bus_dmamap_sync(hba->io_dmat,
403 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 				break;
406 			}
407 
408 			ccb->ccb_h.status = CAM_REQ_CMP;
409 			break;
410 
411 		case IOP_RESULT_BAD_TARGET:
412 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 			break;
414 		case IOP_RESULT_BUSY:
415 			ccb->ccb_h.status = CAM_BUSY;
416 			break;
417 		case IOP_RESULT_INVALID_REQUEST:
418 			ccb->ccb_h.status = CAM_REQ_INVALID;
419 			break;
420 		case IOP_RESULT_FAIL:
421 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 			break;
423 		case IOP_RESULT_RESET:
424 			ccb->ccb_h.status = CAM_BUSY;
425 			break;
426 		case IOP_RESULT_CHECK_CONDITION:
427 			memset(&ccb->csio.sense_data, 0,
428 			    sizeof(ccb->csio.sense_data));
429 			if (dxfer < ccb->csio.sense_len)
430 				ccb->csio.sense_resid = ccb->csio.sense_len -
431 				    dxfer;
432 			else
433 				ccb->csio.sense_resid = 0;
434 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 					index + offsetof(struct hpt_iop_request_scsi_command,
437 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 			} else {
440 				memcpy(&ccb->csio.sense_data, &req->sg_list,
441 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
442 			}
443 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 			break;
447 		default:
448 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 			break;
450 		}
451 scsi_done:
452 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
454 
455 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456 
457 		hptiop_free_srb(hba, srb);
458 		xpt_done(ccb);
459 		break;
460 	}
461 }
462 
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464 {
465 	u_int32_t req, temp;
466 
467 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 			hptiop_request_callback_itl(hba, req);
470 		else {
471 			temp = bus_space_read_4(hba->bar0t,
472 					hba->bar0h,req +
473 					offsetof(struct hpt_iop_request_header,
474 						flags));
475 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
476 				u_int64_t temp64;
477 				bus_space_read_region_4(hba->bar0t,
478 					hba->bar0h,req +
479 					offsetof(struct hpt_iop_request_header,
480 						context),
481 					(u_int32_t *)&temp64, 2);
482 				if (temp64) {
483 					hptiop_request_callback_itl(hba, req);
484 				} else {
485 					temp64 = 1;
486 					bus_space_write_region_4(hba->bar0t,
487 						hba->bar0h,req +
488 						offsetof(struct hpt_iop_request_header,
489 							context),
490 						(u_int32_t *)&temp64, 2);
491 				}
492 			} else
493 				hptiop_request_callback_itl(hba, req);
494 		}
495 	}
496 }
497 
498 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
499 {
500 	u_int32_t status;
501 	int ret = 0;
502 
503 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
504 
505 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
506 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
507 		KdPrint(("hptiop: received outbound msg %x\n", msg));
508 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
509 		hptiop_os_message_callback(hba, msg);
510 		ret = 1;
511 	}
512 
513 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
514 		hptiop_drain_outbound_queue_itl(hba);
515 		ret = 1;
516 	}
517 
518 	return ret;
519 }
520 
521 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
522 							u_int64_t _tag)
523 {
524 	u_int32_t context = (u_int32_t)_tag;
525 
526 	if (context & MVIOP_CMD_TYPE_SCSI) {
527 		struct hpt_iop_srb *srb;
528 		struct hpt_iop_request_scsi_command *req;
529 		union ccb *ccb;
530 		u_int8_t *cdb;
531 
532 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
533 		req = (struct hpt_iop_request_scsi_command *)srb;
534 		ccb = (union ccb *)srb->ccb;
535 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
536 			cdb = ccb->csio.cdb_io.cdb_ptr;
537 		else
538 			cdb = ccb->csio.cdb_io.cdb_bytes;
539 
540 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
541 			ccb->ccb_h.status = CAM_REQ_CMP;
542 			goto scsi_done;
543 		}
544 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
545 			req->header.result = IOP_RESULT_SUCCESS;
546 
547 		switch (req->header.result) {
548 		case IOP_RESULT_SUCCESS:
549 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
550 			case CAM_DIR_IN:
551 				bus_dmamap_sync(hba->io_dmat,
552 					srb->dma_map, BUS_DMASYNC_POSTREAD);
553 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
554 				break;
555 			case CAM_DIR_OUT:
556 				bus_dmamap_sync(hba->io_dmat,
557 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
558 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
559 				break;
560 			}
561 			ccb->ccb_h.status = CAM_REQ_CMP;
562 			break;
563 		case IOP_RESULT_BAD_TARGET:
564 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
565 			break;
566 		case IOP_RESULT_BUSY:
567 			ccb->ccb_h.status = CAM_BUSY;
568 			break;
569 		case IOP_RESULT_INVALID_REQUEST:
570 			ccb->ccb_h.status = CAM_REQ_INVALID;
571 			break;
572 		case IOP_RESULT_FAIL:
573 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
574 			break;
575 		case IOP_RESULT_RESET:
576 			ccb->ccb_h.status = CAM_BUSY;
577 			break;
578 		case IOP_RESULT_CHECK_CONDITION:
579 			memset(&ccb->csio.sense_data, 0,
580 			    sizeof(ccb->csio.sense_data));
581 			if (req->dataxfer_length < ccb->csio.sense_len)
582 				ccb->csio.sense_resid = ccb->csio.sense_len -
583 				    req->dataxfer_length;
584 			else
585 				ccb->csio.sense_resid = 0;
586 			memcpy(&ccb->csio.sense_data, &req->sg_list,
587 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
588 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
589 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
590 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
591 			break;
592 		default:
593 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
594 			break;
595 		}
596 scsi_done:
597 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
598 
599 		hptiop_free_srb(hba, srb);
600 		xpt_done(ccb);
601 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
602 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
603 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
604 			hba->config_done = 1;
605 		else
606 			hba->config_done = -1;
607 		wakeup(req);
608 	} else if (context &
609 			(MVIOP_CMD_TYPE_SET_CONFIG |
610 				MVIOP_CMD_TYPE_GET_CONFIG))
611 		hba->config_done = 1;
612 	else {
613 		device_printf(hba->pcidev, "wrong callback type\n");
614 	}
615 }
616 
617 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
618 				u_int32_t _tag)
619 {
620 	u_int32_t req_type = _tag & 0xf;
621 
622 	struct hpt_iop_srb *srb;
623 	struct hpt_iop_request_scsi_command *req;
624 	union ccb *ccb;
625 	u_int8_t *cdb;
626 
627 	switch (req_type) {
628 	case IOP_REQUEST_TYPE_GET_CONFIG:
629 	case IOP_REQUEST_TYPE_SET_CONFIG:
630 		hba->config_done = 1;
631 		break;
632 
633 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
634 		srb = hba->srb[(_tag >> 4) & 0xff];
635 		req = (struct hpt_iop_request_scsi_command *)srb;
636 
637 		ccb = (union ccb *)srb->ccb;
638 
639 		callout_stop(ccb->ccb_h.timeout_ch);
640 
641 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
642 			cdb = ccb->csio.cdb_io.cdb_ptr;
643 		else
644 			cdb = ccb->csio.cdb_io.cdb_bytes;
645 
646 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
647 			ccb->ccb_h.status = CAM_REQ_CMP;
648 			goto scsi_done;
649 		}
650 
651 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
652 			req->header.result = IOP_RESULT_SUCCESS;
653 
654 		switch (req->header.result) {
655 		case IOP_RESULT_SUCCESS:
656 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
657 			case CAM_DIR_IN:
658 				bus_dmamap_sync(hba->io_dmat,
659 						srb->dma_map, BUS_DMASYNC_POSTREAD);
660 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
661 				break;
662 			case CAM_DIR_OUT:
663 				bus_dmamap_sync(hba->io_dmat,
664 						srb->dma_map, BUS_DMASYNC_POSTWRITE);
665 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
666 				break;
667 			}
668 			ccb->ccb_h.status = CAM_REQ_CMP;
669 			break;
670 		case IOP_RESULT_BAD_TARGET:
671 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
672 			break;
673 		case IOP_RESULT_BUSY:
674 			ccb->ccb_h.status = CAM_BUSY;
675 			break;
676 		case IOP_RESULT_INVALID_REQUEST:
677 			ccb->ccb_h.status = CAM_REQ_INVALID;
678 			break;
679 		case IOP_RESULT_FAIL:
680 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
681 			break;
682 		case IOP_RESULT_RESET:
683 			ccb->ccb_h.status = CAM_BUSY;
684 			break;
685 		case IOP_RESULT_CHECK_CONDITION:
686 			memset(&ccb->csio.sense_data, 0,
687 			       sizeof(ccb->csio.sense_data));
688 			if (req->dataxfer_length < ccb->csio.sense_len)
689 				ccb->csio.sense_resid = ccb->csio.sense_len -
690 				req->dataxfer_length;
691 			else
692 				ccb->csio.sense_resid = 0;
693 			memcpy(&ccb->csio.sense_data, &req->sg_list,
694 			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
695 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
696 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
697 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
698 			break;
699 		default:
700 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
701 			break;
702 		}
703 scsi_done:
704 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
705 
706 		hptiop_free_srb(hba, srb);
707 		xpt_done(ccb);
708 		break;
709 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
710 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
711 			hba->config_done = 1;
712 		else
713 			hba->config_done = -1;
714 		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
715 		break;
716 	default:
717 		device_printf(hba->pcidev, "wrong callback type\n");
718 		break;
719 	}
720 }
721 
722 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
723 {
724 	u_int64_t req;
725 
726 	while ((req = hptiop_mv_outbound_read(hba))) {
727 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
728 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
729 				hptiop_request_callback_mv(hba, req);
730 			}
731 		}
732 	}
733 }
734 
735 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
736 {
737 	u_int32_t status;
738 	int ret = 0;
739 
740 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
741 
742 	if (status)
743 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
744 
745 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
746 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
747 		KdPrint(("hptiop: received outbound msg %x\n", msg));
748 		hptiop_os_message_callback(hba, msg);
749 		ret = 1;
750 	}
751 
752 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
753 		hptiop_drain_outbound_queue_mv(hba);
754 		ret = 1;
755 	}
756 
757 	return ret;
758 }
759 
760 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
761 {
762 	u_int32_t status, _tag, cptr;
763 	int ret = 0;
764 
765 	if (hba->initialized) {
766 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
767 	}
768 
769 	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
770 	if (status) {
771 		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
772 		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
773 			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
774 			hptiop_os_message_callback(hba, msg);
775 		}
776 		ret = 1;
777 	}
778 
779 	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
780 	if (status) {
781 		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
782 		do {
783 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
784 			while (hba->u.mvfrey.outlist_rptr != cptr) {
785 				hba->u.mvfrey.outlist_rptr++;
786 				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
787 					hba->u.mvfrey.outlist_rptr = 0;
788 				}
789 
790 				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
791 				hptiop_request_callback_mvfrey(hba, _tag);
792 				ret = 2;
793 			}
794 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
795 	}
796 
797 	if (hba->initialized) {
798 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
799 	}
800 
801 	return ret;
802 }
803 
804 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
805 					u_int32_t req32, u_int32_t millisec)
806 {
807 	u_int32_t i;
808 	u_int64_t temp64;
809 
810 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
811 	BUS_SPACE_RD4_ITL(outbound_intstatus);
812 
813 	for (i = 0; i < millisec; i++) {
814 		hptiop_intr_itl(hba);
815 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
816 			offsetof(struct hpt_iop_request_header, context),
817 			(u_int32_t *)&temp64, 2);
818 		if (temp64)
819 			return 0;
820 		DELAY(1000);
821 	}
822 
823 	return -1;
824 }
825 
826 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
827 					void *req, u_int32_t millisec)
828 {
829 	u_int32_t i;
830 	u_int64_t phy_addr;
831 	hba->config_done = 0;
832 
833 	phy_addr = hba->ctlcfgcmd_phy |
834 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
835 	((struct hpt_iop_request_get_config *)req)->header.flags |=
836 		IOP_REQUEST_FLAG_SYNC_REQUEST |
837 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
838 	hptiop_mv_inbound_write(phy_addr, hba);
839 	BUS_SPACE_RD4_MV0(outbound_intmask);
840 
841 	for (i = 0; i < millisec; i++) {
842 		hptiop_intr_mv(hba);
843 		if (hba->config_done)
844 			return 0;
845 		DELAY(1000);
846 	}
847 	return -1;
848 }
849 
850 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
851 					void *req, u_int32_t millisec)
852 {
853 	u_int32_t i, index;
854 	u_int64_t phy_addr;
855 	struct hpt_iop_request_header *reqhdr =	(struct hpt_iop_request_header *)req;
856 
857 	hba->config_done = 0;
858 
859 	phy_addr = hba->ctlcfgcmd_phy;
860 	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
861 					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
862 					| IOP_REQUEST_FLAG_ADDR_BITS
863 					| ((phy_addr >> 16) & 0xffff0000);
864 	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
865 					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
866 
867 	hba->u.mvfrey.inlist_wptr++;
868 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
869 
870 	if (index == hba->u.mvfrey.list_count) {
871 		index = 0;
872 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
873 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
874 	}
875 
876 	hba->u.mvfrey.inlist[index].addr = phy_addr;
877 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
878 
879 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
880 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
881 
882 	for (i = 0; i < millisec; i++) {
883 		hptiop_intr_mvfrey(hba);
884 		if (hba->config_done)
885 			return 0;
886 		DELAY(1000);
887 	}
888 	return -1;
889 }
890 
891 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
892 					u_int32_t msg, u_int32_t millisec)
893 {
894 	u_int32_t i;
895 
896 	hba->msg_done = 0;
897 	hba->ops->post_msg(hba, msg);
898 
899 	for (i=0; i<millisec; i++) {
900 		hba->ops->iop_intr(hba);
901 		if (hba->msg_done)
902 			break;
903 		DELAY(1000);
904 	}
905 
906 	return hba->msg_done? 0 : -1;
907 }
908 
909 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
910 				struct hpt_iop_request_get_config * config)
911 {
912 	u_int32_t req32;
913 
914 	config->header.size = sizeof(struct hpt_iop_request_get_config);
915 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
916 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
917 	config->header.result = IOP_RESULT_PENDING;
918 	config->header.context = 0;
919 
920 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
921 	if (req32 == IOPMU_QUEUE_EMPTY)
922 		return -1;
923 
924 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
925 			req32, (u_int32_t *)config,
926 			sizeof(struct hpt_iop_request_header) >> 2);
927 
928 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
929 		KdPrint(("hptiop: get config send cmd failed"));
930 		return -1;
931 	}
932 
933 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
934 			req32, (u_int32_t *)config,
935 			sizeof(struct hpt_iop_request_get_config) >> 2);
936 
937 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
938 
939 	return 0;
940 }
941 
942 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
943 				struct hpt_iop_request_get_config * config)
944 {
945 	struct hpt_iop_request_get_config *req;
946 
947 	if (!(req = hba->ctlcfg_ptr))
948 		return -1;
949 
950 	req->header.flags = 0;
951 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
952 	req->header.size = sizeof(struct hpt_iop_request_get_config);
953 	req->header.result = IOP_RESULT_PENDING;
954 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
955 
956 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
957 		KdPrint(("hptiop: get config send cmd failed"));
958 		return -1;
959 	}
960 
961 	*config = *req;
962 	return 0;
963 }
964 
965 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
966 				struct hpt_iop_request_get_config * config)
967 {
968 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
969 
970 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
971 	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
972 		KdPrint(("hptiop: header size %x/%x type %x/%x",
973 			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
974 			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
975 		return -1;
976 	}
977 
978 	config->interface_version = info->interface_version;
979 	config->firmware_version = info->firmware_version;
980 	config->max_requests = info->max_requests;
981 	config->request_size = info->request_size;
982 	config->max_sg_count = info->max_sg_count;
983 	config->data_transfer_length = info->data_transfer_length;
984 	config->alignment_mask = info->alignment_mask;
985 	config->max_devices = info->max_devices;
986 	config->sdram_size = info->sdram_size;
987 
988 	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
989 		 config->max_requests, config->request_size,
990 		 config->data_transfer_length, config->max_devices,
991 		 config->sdram_size));
992 
993 	return 0;
994 }
995 
996 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
997 				struct hpt_iop_request_set_config *config)
998 {
999 	u_int32_t req32;
1000 
1001 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1002 
1003 	if (req32 == IOPMU_QUEUE_EMPTY)
1004 		return -1;
1005 
1006 	config->header.size = sizeof(struct hpt_iop_request_set_config);
1007 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1008 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1009 	config->header.result = IOP_RESULT_PENDING;
1010 	config->header.context = 0;
1011 
1012 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1013 		(u_int32_t *)config,
1014 		sizeof(struct hpt_iop_request_set_config) >> 2);
1015 
1016 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1017 		KdPrint(("hptiop: set config send cmd failed"));
1018 		return -1;
1019 	}
1020 
1021 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1022 
1023 	return 0;
1024 }
1025 
1026 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1027 				struct hpt_iop_request_set_config *config)
1028 {
1029 	struct hpt_iop_request_set_config *req;
1030 
1031 	if (!(req = hba->ctlcfg_ptr))
1032 		return -1;
1033 
1034 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1035 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1036 		sizeof(struct hpt_iop_request_set_config) -
1037 			sizeof(struct hpt_iop_request_header));
1038 
1039 	req->header.flags = 0;
1040 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1041 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1042 	req->header.result = IOP_RESULT_PENDING;
1043 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1044 
1045 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1046 		KdPrint(("hptiop: set config send cmd failed"));
1047 		return -1;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1054 				struct hpt_iop_request_set_config *config)
1055 {
1056 	struct hpt_iop_request_set_config *req;
1057 
1058 	if (!(req = hba->ctlcfg_ptr))
1059 		return -1;
1060 
1061 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1062 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1063 		sizeof(struct hpt_iop_request_set_config) -
1064 			sizeof(struct hpt_iop_request_header));
1065 
1066 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1067 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1068 	req->header.result = IOP_RESULT_PENDING;
1069 
1070 	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1071 		KdPrint(("hptiop: set config send cmd failed"));
1072 		return -1;
1073 	}
1074 
1075 	return 0;
1076 }
1077 
1078 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1079 				u_int32_t req32,
1080 				struct hpt_iop_ioctl_param *pParams)
1081 {
1082 	u_int64_t temp64;
1083 	struct hpt_iop_request_ioctl_command req;
1084 
1085 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1086 			(hba->max_request_size -
1087 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1088 		device_printf(hba->pcidev, "request size beyond max value");
1089 		return -1;
1090 	}
1091 
1092 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1093 		+ pParams->nInBufferSize;
1094 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1095 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1096 	req.header.result = IOP_RESULT_PENDING;
1097 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1098 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1099 	req.inbuf_size = pParams->nInBufferSize;
1100 	req.outbuf_size = pParams->nOutBufferSize;
1101 	req.bytes_returned = 0;
1102 
1103 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1104 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1105 
1106 	hptiop_lock_adapter(hba);
1107 
1108 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1109 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1110 
1111 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1112 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1113 		(u_int32_t *)&temp64, 2);
1114 	while (temp64) {
1115 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1116 				0, "hptctl", HPT_OSM_TIMEOUT)==0)
1117 			break;
1118 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1119 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1120 			offsetof(struct hpt_iop_request_ioctl_command,
1121 				header.context),
1122 			(u_int32_t *)&temp64, 2);
1123 	}
1124 
1125 	hptiop_unlock_adapter(hba);
1126 	return 0;
1127 }
1128 
1129 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1130     void *user, int size)
1131 {
1132 	unsigned char byte;
1133 	int i;
1134 
1135 	for (i=0; i<size; i++) {
1136 		if (copyin((u_int8_t *)user + i, &byte, 1))
1137 			return -1;
1138 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1139 	}
1140 
1141 	return 0;
1142 }
1143 
1144 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1145     void *user, int size)
1146 {
1147 	unsigned char byte;
1148 	int i;
1149 
1150 	for (i=0; i<size; i++) {
1151 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1152 		if (copyout(&byte, (u_int8_t *)user + i, 1))
1153 			return -1;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1160 				struct hpt_iop_ioctl_param * pParams)
1161 {
1162 	u_int32_t req32;
1163 	u_int32_t result;
1164 
1165 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1166 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1167 		return EFAULT;
1168 
1169 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1170 	if (req32 == IOPMU_QUEUE_EMPTY)
1171 		return EFAULT;
1172 
1173 	if (pParams->nInBufferSize)
1174 		if (hptiop_bus_space_copyin(hba, req32 +
1175 			offsetof(struct hpt_iop_request_ioctl_command, buf),
1176 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1177 			goto invalid;
1178 
1179 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1180 		goto invalid;
1181 
1182 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1183 			offsetof(struct hpt_iop_request_ioctl_command,
1184 				header.result));
1185 
1186 	if (result == IOP_RESULT_SUCCESS) {
1187 		if (pParams->nOutBufferSize)
1188 			if (hptiop_bus_space_copyout(hba, req32 +
1189 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1190 					((pParams->nInBufferSize + 3) & ~3),
1191 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1192 				goto invalid;
1193 
1194 		if (pParams->lpBytesReturned) {
1195 			if (hptiop_bus_space_copyout(hba, req32 +
1196 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1197 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1198 				goto invalid;
1199 		}
1200 
1201 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1202 
1203 		return 0;
1204 	} else{
1205 invalid:
1206 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1207 
1208 		return EFAULT;
1209 	}
1210 }
1211 
1212 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1213 				struct hpt_iop_request_ioctl_command *req,
1214 				struct hpt_iop_ioctl_param *pParams)
1215 {
1216 	u_int64_t req_phy;
1217 	int size = 0;
1218 
1219 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1220 			(hba->max_request_size -
1221 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1222 		device_printf(hba->pcidev, "request size beyond max value");
1223 		return -1;
1224 	}
1225 
1226 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1227 	req->inbuf_size = pParams->nInBufferSize;
1228 	req->outbuf_size = pParams->nOutBufferSize;
1229 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1230 					+ pParams->nInBufferSize;
1231 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1232 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1233 	req->header.result = IOP_RESULT_PENDING;
1234 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1235 	size = req->header.size >> 8;
1236 	size = size > 3 ? 3 : size;
1237 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1238 	hptiop_mv_inbound_write(req_phy, hba);
1239 
1240 	BUS_SPACE_RD4_MV0(outbound_intmask);
1241 
1242 	while (hba->config_done == 0) {
1243 		if (hptiop_sleep(hba, req, 0,
1244 			"hptctl", HPT_OSM_TIMEOUT)==0)
1245 			continue;
1246 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1247 	}
1248 	return 0;
1249 }
1250 
1251 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1252 				struct hpt_iop_ioctl_param *pParams)
1253 {
1254 	struct hpt_iop_request_ioctl_command *req;
1255 
1256 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1257 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1258 		return EFAULT;
1259 
1260 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1261 	hba->config_done = 0;
1262 	hptiop_lock_adapter(hba);
1263 	if (pParams->nInBufferSize)
1264 		if (copyin((void *)pParams->lpInBuffer,
1265 				req->buf, pParams->nInBufferSize))
1266 			goto invalid;
1267 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1268 		goto invalid;
1269 
1270 	if (hba->config_done == 1) {
1271 		if (pParams->nOutBufferSize)
1272 			if (copyout(req->buf +
1273 				((pParams->nInBufferSize + 3) & ~3),
1274 				(void *)pParams->lpOutBuffer,
1275 				pParams->nOutBufferSize))
1276 				goto invalid;
1277 
1278 		if (pParams->lpBytesReturned)
1279 			if (copyout(&req->bytes_returned,
1280 				(void*)pParams->lpBytesReturned,
1281 				sizeof(u_int32_t)))
1282 				goto invalid;
1283 		hptiop_unlock_adapter(hba);
1284 		return 0;
1285 	} else{
1286 invalid:
1287 		hptiop_unlock_adapter(hba);
1288 		return EFAULT;
1289 	}
1290 }
1291 
1292 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1293 				struct hpt_iop_request_ioctl_command *req,
1294 				struct hpt_iop_ioctl_param *pParams)
1295 {
1296 	u_int64_t phy_addr;
1297 	u_int32_t index;
1298 
1299 	phy_addr = hba->ctlcfgcmd_phy;
1300 
1301 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1302 			(hba->max_request_size -
1303 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1304 		device_printf(hba->pcidev, "request size beyond max value");
1305 		return -1;
1306 	}
1307 
1308 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1309 	req->inbuf_size = pParams->nInBufferSize;
1310 	req->outbuf_size = pParams->nOutBufferSize;
1311 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1312 					+ pParams->nInBufferSize;
1313 
1314 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1315 	req->header.result = IOP_RESULT_PENDING;
1316 
1317 	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1318 						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1319 						| IOP_REQUEST_FLAG_ADDR_BITS
1320 						| ((phy_addr >> 16) & 0xffff0000);
1321 	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1322 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1323 
1324 	hba->u.mvfrey.inlist_wptr++;
1325 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1326 
1327 	if (index == hba->u.mvfrey.list_count) {
1328 		index = 0;
1329 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1330 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1331 	}
1332 
1333 	hba->u.mvfrey.inlist[index].addr = phy_addr;
1334 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1335 
1336 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1337 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1338 
1339 	while (hba->config_done == 0) {
1340 		if (hptiop_sleep(hba, req, 0, "hptctl", HPT_OSM_TIMEOUT) == 0)
1341 			continue;
1342 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1343 	}
1344 	return 0;
1345 }
1346 
1347 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1348 				struct hpt_iop_ioctl_param *pParams)
1349 {
1350 	struct hpt_iop_request_ioctl_command *req;
1351 
1352 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1353 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1354 		return EFAULT;
1355 
1356 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1357 	hba->config_done = 0;
1358 	hptiop_lock_adapter(hba);
1359 	if (pParams->nInBufferSize)
1360 		if (copyin((void *)pParams->lpInBuffer,
1361 				req->buf, pParams->nInBufferSize))
1362 			goto invalid;
1363 	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1364 		goto invalid;
1365 
1366 	if (hba->config_done == 1) {
1367 		if (pParams->nOutBufferSize)
1368 			if (copyout(req->buf +
1369 				((pParams->nInBufferSize + 3) & ~3),
1370 				(void *)pParams->lpOutBuffer,
1371 				pParams->nOutBufferSize))
1372 				goto invalid;
1373 
1374 		if (pParams->lpBytesReturned)
1375 			if (copyout(&req->bytes_returned,
1376 				(void*)pParams->lpBytesReturned,
1377 				sizeof(u_int32_t)))
1378 				goto invalid;
1379 		hptiop_unlock_adapter(hba);
1380 		return 0;
1381 	} else{
1382 invalid:
1383 		hptiop_unlock_adapter(hba);
1384 		return EFAULT;
1385 	}
1386 }
1387 
1388 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1389 {
1390 	union ccb           *ccb;
1391 
1392 	if ((ccb = xpt_alloc_ccb()) == NULL)
1393 		return(ENOMEM);
1394 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1395 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1396 		xpt_free_ccb(&ccb->ccb_h);
1397 		return(EIO);
1398 	}
1399 
1400 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1401 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
1402 	ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
1403 	ccb->crcn.flags = CAM_FLAG_NONE;
1404 	xpt_action(ccb);
1405 	return(0);
1406 }
1407 
1408 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
1409 {
1410 	xpt_free_path(ccb->ccb_h.path);
1411 	xpt_free_ccb(&ccb->ccb_h);
1412 }
1413 
1414 static	bus_dmamap_callback_t	hptiop_map_srb;
1415 static	bus_dmamap_callback_t	hptiop_post_scsi_command;
1416 static	bus_dmamap_callback_t	hptiop_mv_map_ctlcfg;
1417 static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1418 
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1420 {
1421 	hba->bar0_rid = 0x10;
1422 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1423 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1424 
1425 	if (hba->bar0_res == NULL) {
1426 		device_printf(hba->pcidev,
1427 			"failed to get iop base adrress.\n");
1428 		return -1;
1429 	}
1430 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1431 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1432 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1433 				rman_get_virtual(hba->bar0_res);
1434 
1435 	if (!hba->u.itl.mu) {
1436 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1437 					hba->bar0_rid, hba->bar0_res);
1438 		device_printf(hba->pcidev, "alloc mem res failed\n");
1439 		return -1;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1446 {
1447 	hba->bar0_rid = 0x10;
1448 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1449 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1450 
1451 	if (hba->bar0_res == NULL) {
1452 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453 		return -1;
1454 	}
1455 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1456 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1457 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1458 				rman_get_virtual(hba->bar0_res);
1459 
1460 	if (!hba->u.mv.regs) {
1461 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1462 					hba->bar0_rid, hba->bar0_res);
1463 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1464 		return -1;
1465 	}
1466 
1467 	hba->bar2_rid = 0x18;
1468 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1469 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1470 
1471 	if (hba->bar2_res == NULL) {
1472 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473 					hba->bar0_rid, hba->bar0_res);
1474 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1475 		return -1;
1476 	}
1477 
1478 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1479 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1480 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1481 
1482 	if (!hba->u.mv.mu) {
1483 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 					hba->bar0_rid, hba->bar0_res);
1485 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1486 					hba->bar2_rid, hba->bar2_res);
1487 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1488 		return -1;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1495 {
1496 	hba->bar0_rid = 0x10;
1497 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1498 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1499 
1500 	if (hba->bar0_res == NULL) {
1501 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502 		return -1;
1503 	}
1504 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1505 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1506 	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1507 				rman_get_virtual(hba->bar0_res);
1508 
1509 	if (!hba->u.mvfrey.config) {
1510 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1511 					hba->bar0_rid, hba->bar0_res);
1512 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1513 		return -1;
1514 	}
1515 
1516 	hba->bar2_rid = 0x18;
1517 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1518 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1519 
1520 	if (hba->bar2_res == NULL) {
1521 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522 					hba->bar0_rid, hba->bar0_res);
1523 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1524 		return -1;
1525 	}
1526 
1527 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1528 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1529 	hba->u.mvfrey.mu =
1530 					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1531 
1532 	if (!hba->u.mvfrey.mu) {
1533 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 					hba->bar0_rid, hba->bar0_res);
1535 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1536 					hba->bar2_rid, hba->bar2_res);
1537 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1538 		return -1;
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1545 {
1546 	if (hba->bar0_res)
1547 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1548 			hba->bar0_rid, hba->bar0_res);
1549 }
1550 
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1552 {
1553 	if (hba->bar0_res)
1554 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555 			hba->bar0_rid, hba->bar0_res);
1556 	if (hba->bar2_res)
1557 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1558 			hba->bar2_rid, hba->bar2_res);
1559 }
1560 
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1562 {
1563 	if (hba->bar0_res)
1564 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1565 			hba->bar0_rid, hba->bar0_res);
1566 	if (hba->bar2_res)
1567 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1568 			hba->bar2_rid, hba->bar2_res);
1569 }
1570 
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1572 {
1573 	if (bus_dma_tag_create(hba->parent_dmat,
1574 				1,
1575 				0,
1576 				BUS_SPACE_MAXADDR_32BIT,
1577 				BUS_SPACE_MAXADDR,
1578 				0x800 - 0x8,
1579 				1,
1580 				BUS_SPACE_MAXSIZE_32BIT,
1581 				BUS_DMA_ALLOCNOW,
1582 				&hba->ctlcfg_dmat)) {
1583 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1584 		return -1;
1585 	}
1586 
1587 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1588 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1589 		&hba->ctlcfg_dmamap) != 0) {
1590 			device_printf(hba->pcidev,
1591 					"bus_dmamem_alloc failed!\n");
1592 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1593 			return -1;
1594 	}
1595 
1596 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1597 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1598 			MVIOP_IOCTLCFG_SIZE,
1599 			hptiop_mv_map_ctlcfg, hba, 0)) {
1600 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1601 		if (hba->ctlcfg_dmat) {
1602 			bus_dmamem_free(hba->ctlcfg_dmat,
1603 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1604 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1605 		}
1606 		return -1;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1613 {
1614 	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1615 
1616 	list_count >>= 16;
1617 
1618 	if (list_count == 0) {
1619 		return -1;
1620 	}
1621 
1622 	hba->u.mvfrey.list_count = list_count;
1623 	hba->u.mvfrey.internal_mem_size = 0x800
1624 							+ list_count * sizeof(struct mvfrey_inlist_entry)
1625 							+ list_count * sizeof(struct mvfrey_outlist_entry)
1626 							+ sizeof(int);
1627 	if (bus_dma_tag_create(hba->parent_dmat,
1628 				1,
1629 				0,
1630 				BUS_SPACE_MAXADDR_32BIT,
1631 				BUS_SPACE_MAXADDR,
1632 				hba->u.mvfrey.internal_mem_size,
1633 				1,
1634 				BUS_SPACE_MAXSIZE_32BIT,
1635 				BUS_DMA_ALLOCNOW,
1636 				&hba->ctlcfg_dmat)) {
1637 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1638 		return -1;
1639 	}
1640 
1641 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1642 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1643 		&hba->ctlcfg_dmamap) != 0) {
1644 			device_printf(hba->pcidev,
1645 					"bus_dmamem_alloc failed!\n");
1646 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1647 			return -1;
1648 	}
1649 
1650 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1651 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1652 			hba->u.mvfrey.internal_mem_size,
1653 			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1654 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1655 		if (hba->ctlcfg_dmat) {
1656 			bus_dmamem_free(hba->ctlcfg_dmat,
1657 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1658 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1659 		}
1660 		return -1;
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1667 	return 0;
1668 }
1669 
1670 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1671 {
1672 	if (hba->ctlcfg_dmat) {
1673 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1674 		bus_dmamem_free(hba->ctlcfg_dmat,
1675 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1676 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1677 	}
1678 
1679 	return 0;
1680 }
1681 
1682 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1683 {
1684 	if (hba->ctlcfg_dmat) {
1685 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1686 		bus_dmamem_free(hba->ctlcfg_dmat,
1687 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1688 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1689 	}
1690 
1691 	return 0;
1692 }
1693 
1694 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1695 {
1696 	u_int32_t i = 100;
1697 
1698 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1699 		return -1;
1700 
1701 	/* wait 100ms for MCU ready */
1702 	while(i--) {
1703 		DELAY(1000);
1704 	}
1705 
1706 	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1707 							hba->u.mvfrey.inlist_phy & 0xffffffff);
1708 	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1709 							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1710 
1711 	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1712 							hba->u.mvfrey.outlist_phy & 0xffffffff);
1713 	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1714 							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1715 
1716 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1717 							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1718 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1719 							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1720 
1721 	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1722 								| CL_POINTER_TOGGLE;
1723 	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1724 								| CL_POINTER_TOGGLE;
1725 	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1726 
1727 	return 0;
1728 }
1729 
1730 /*
1731  * CAM driver interface
1732  */
1733 static device_method_t driver_methods[] = {
1734 	/* Device interface */
1735 	DEVMETHOD(device_probe,     hptiop_probe),
1736 	DEVMETHOD(device_attach,    hptiop_attach),
1737 	DEVMETHOD(device_detach,    hptiop_detach),
1738 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1739 	DEVMETHOD_END
1740 };
1741 
1742 static struct hptiop_adapter_ops hptiop_itl_ops = {
1743 	.family	           = INTEL_BASED_IOP,
1744 	.iop_wait_ready    = hptiop_wait_ready_itl,
1745 	.internal_memalloc = 0,
1746 	.internal_memfree  = hptiop_internal_memfree_itl,
1747 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1748 	.release_pci_res   = hptiop_release_pci_res_itl,
1749 	.enable_intr       = hptiop_enable_intr_itl,
1750 	.disable_intr      = hptiop_disable_intr_itl,
1751 	.get_config        = hptiop_get_config_itl,
1752 	.set_config        = hptiop_set_config_itl,
1753 	.iop_intr          = hptiop_intr_itl,
1754 	.post_msg          = hptiop_post_msg_itl,
1755 	.post_req          = hptiop_post_req_itl,
1756 	.do_ioctl          = hptiop_do_ioctl_itl,
1757 	.reset_comm        = 0,
1758 };
1759 
1760 static struct hptiop_adapter_ops hptiop_mv_ops = {
1761 	.family	           = MV_BASED_IOP,
1762 	.iop_wait_ready    = hptiop_wait_ready_mv,
1763 	.internal_memalloc = hptiop_internal_memalloc_mv,
1764 	.internal_memfree  = hptiop_internal_memfree_mv,
1765 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1766 	.release_pci_res   = hptiop_release_pci_res_mv,
1767 	.enable_intr       = hptiop_enable_intr_mv,
1768 	.disable_intr      = hptiop_disable_intr_mv,
1769 	.get_config        = hptiop_get_config_mv,
1770 	.set_config        = hptiop_set_config_mv,
1771 	.iop_intr          = hptiop_intr_mv,
1772 	.post_msg          = hptiop_post_msg_mv,
1773 	.post_req          = hptiop_post_req_mv,
1774 	.do_ioctl          = hptiop_do_ioctl_mv,
1775 	.reset_comm        = 0,
1776 };
1777 
1778 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1779 	.family	           = MVFREY_BASED_IOP,
1780 	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1781 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1782 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1783 	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1784 	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1785 	.enable_intr       = hptiop_enable_intr_mvfrey,
1786 	.disable_intr      = hptiop_disable_intr_mvfrey,
1787 	.get_config        = hptiop_get_config_mvfrey,
1788 	.set_config        = hptiop_set_config_mvfrey,
1789 	.iop_intr          = hptiop_intr_mvfrey,
1790 	.post_msg          = hptiop_post_msg_mvfrey,
1791 	.post_req          = hptiop_post_req_mvfrey,
1792 	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1793 	.reset_comm        = hptiop_reset_comm_mvfrey,
1794 };
1795 
1796 static driver_t hptiop_pci_driver = {
1797 	driver_name,
1798 	driver_methods,
1799 	sizeof(struct hpt_iop_hba)
1800 };
1801 
1802 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, NULL, NULL);
1803 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1804 MODULE_VERSION(hptiop, 1);
1805 
1806 static int hptiop_probe(device_t dev)
1807 {
1808 	struct hpt_iop_hba *hba;
1809 	u_int32_t id;
1810 	static char buf[256];
1811 	int sas = 0;
1812 	struct hptiop_adapter_ops *ops;
1813 
1814 	if (pci_get_vendor(dev) != 0x1103)
1815 		return (ENXIO);
1816 
1817 	id = pci_get_device(dev);
1818 
1819 	switch (id) {
1820 		case 0x4520:
1821 		case 0x4522:
1822 			sas = 1;
1823 			ops = &hptiop_mvfrey_ops;
1824 			break;
1825 		case 0x4210:
1826 		case 0x4211:
1827 		case 0x4310:
1828 		case 0x4311:
1829 		case 0x4320:
1830 		case 0x4321:
1831 		case 0x4322:
1832 			sas = 1;
1833 		case 0x3220:
1834 		case 0x3320:
1835 		case 0x3410:
1836 		case 0x3520:
1837 		case 0x3510:
1838 		case 0x3511:
1839 		case 0x3521:
1840 		case 0x3522:
1841 		case 0x3530:
1842 		case 0x3540:
1843 		case 0x3560:
1844 			ops = &hptiop_itl_ops;
1845 			break;
1846 		case 0x3020:
1847 		case 0x3120:
1848 		case 0x3122:
1849 			ops = &hptiop_mv_ops;
1850 			break;
1851 		default:
1852 			return (ENXIO);
1853 	}
1854 
1855 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1856 		pci_get_bus(dev), pci_get_slot(dev),
1857 		pci_get_function(dev), pci_get_irq(dev));
1858 
1859 	ksprintf(buf, "RocketRAID %x %s Controller",
1860 				id, sas ? "SAS" : "SATA");
1861 	device_set_desc_copy(dev, buf);
1862 
1863 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1864 	bzero(hba, sizeof(struct hpt_iop_hba));
1865 	hba->ops = ops;
1866 
1867 	KdPrint(("hba->ops=%p\n", hba->ops));
1868 	return 0;
1869 }
1870 
1871 static int hptiop_attach(device_t dev)
1872 {
1873 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1874 	struct hpt_iop_request_get_config  iop_config;
1875 	struct hpt_iop_request_set_config  set_config;
1876 	int rid = 0;
1877 	struct cam_devq *devq;
1878 	struct ccb_setasync *ccb;
1879 	u_int32_t unit = device_get_unit(dev);
1880 
1881 	device_printf(dev, "RocketRAID 3xxx/4xxx controller driver %s\n",
1882 	    driver_version);
1883 
1884 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1885 		pci_get_bus(dev), pci_get_slot(dev),
1886 		pci_get_function(dev), hba->ops));
1887 
1888 	pci_enable_busmaster(dev);
1889 	hba->pcidev = dev;
1890 
1891 	if (hba->ops->alloc_pci_res(hba))
1892 		return ENXIO;
1893 
1894 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1895 		device_printf(dev, "adapter is not ready\n");
1896 		goto release_pci_res;
1897 	}
1898 
1899 	lockinit(&hba->lock, "hptioplock", 0, LK_CANRECURSE);
1900 
1901 	if (bus_dma_tag_create(NULL,/* parent */
1902 			1,  /* alignment */
1903 			0, /* boundary */
1904 			BUS_SPACE_MAXADDR,  /* lowaddr */
1905 			BUS_SPACE_MAXADDR,  /* highaddr */
1906 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1907 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1908 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1909 			0,      /* flags */
1910 			&hba->parent_dmat   /* tag */))
1911 	{
1912 		device_printf(dev, "alloc parent_dmat failed\n");
1913 		goto release_pci_res;
1914 	}
1915 
1916 	if (hba->ops->family == MV_BASED_IOP) {
1917 		if (hba->ops->internal_memalloc(hba)) {
1918 			device_printf(dev, "alloc srb_dmat failed\n");
1919 			goto destroy_parent_tag;
1920 		}
1921 	}
1922 
1923 	if (hba->ops->get_config(hba, &iop_config)) {
1924 		device_printf(dev, "get iop config failed.\n");
1925 		goto get_config_failed;
1926 	}
1927 
1928 	hba->firmware_version = iop_config.firmware_version;
1929 	hba->interface_version = iop_config.interface_version;
1930 	hba->max_requests = iop_config.max_requests;
1931 	hba->max_devices = iop_config.max_devices;
1932 	hba->max_request_size = iop_config.request_size;
1933 	hba->max_sg_count = iop_config.max_sg_count;
1934 
1935 	if (hba->ops->family == MVFREY_BASED_IOP) {
1936 		if (hba->ops->internal_memalloc(hba)) {
1937 			device_printf(dev, "alloc srb_dmat failed\n");
1938 			goto destroy_parent_tag;
1939 		}
1940 		if (hba->ops->reset_comm(hba)) {
1941 			device_printf(dev, "reset comm failed\n");
1942 			goto get_config_failed;
1943 		}
1944 	}
1945 
1946 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1947 			4,  /* alignment */
1948 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1949 			BUS_SPACE_MAXADDR,  /* lowaddr */
1950 			BUS_SPACE_MAXADDR,  /* highaddr */
1951 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1952 			hba->max_sg_count,  /* nsegments */
1953 			0x20000,    /* maxsegsize */
1954 			BUS_DMA_ALLOCNOW,       /* flags */
1955 			&hba->io_dmat   /* tag */))
1956 	{
1957 		device_printf(dev, "alloc io_dmat failed\n");
1958 		goto get_config_failed;
1959 	}
1960 
1961 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1962 			1,  /* alignment */
1963 			0, /* boundary */
1964 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1965 			BUS_SPACE_MAXADDR,  /* highaddr */
1966 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1967 			1,  /* nsegments */
1968 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1969 			0,      /* flags */
1970 			&hba->srb_dmat  /* tag */))
1971 	{
1972 		device_printf(dev, "alloc srb_dmat failed\n");
1973 		goto destroy_io_dmat;
1974 	}
1975 
1976 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1977 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1978 			&hba->srb_dmamap) != 0)
1979 	{
1980 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1981 		goto destroy_srb_dmat;
1982 	}
1983 
1984 	if (bus_dmamap_load(hba->srb_dmat,
1985 			hba->srb_dmamap, hba->uncached_ptr,
1986 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1987 			hptiop_map_srb, hba, 0))
1988 	{
1989 		device_printf(dev, "bus_dmamap_load failed!\n");
1990 		goto srb_dmamem_free;
1991 	}
1992 
1993 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1994 		device_printf(dev, "cam_simq_alloc failed\n");
1995 		goto srb_dmamap_unload;
1996 	}
1997 
1998 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1999 			hba, unit, &sim_mplock, hba->max_requests - 1, 1, devq);
2000 	cam_simq_release(devq);
2001 	if (!hba->sim) {
2002 		device_printf(dev, "cam_sim_alloc failed\n");
2003 		goto srb_dmamap_unload;
2004 	}
2005 	if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
2006 	{
2007 		device_printf(dev, "xpt_bus_register failed\n");
2008 		goto free_cam_sim;
2009 	}
2010 
2011 	if (xpt_create_path(&hba->path, /*periph */ NULL,
2012 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2013 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2014 		device_printf(dev, "xpt_create_path failed\n");
2015 		goto deregister_xpt_bus;
2016 	}
2017 
2018 	bzero(&set_config, sizeof(set_config));
2019 	set_config.iop_id = unit;
2020 	set_config.vbus_id = cam_sim_path(hba->sim);
2021 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2022 
2023 	if (hba->ops->set_config(hba, &set_config)) {
2024 		device_printf(dev, "set iop config failed.\n");
2025 		goto free_hba_path;
2026 	}
2027 
2028 	ccb = &xpt_alloc_ccb()->csa;
2029 
2030 	xpt_setup_ccb(&ccb->ccb_h, hba->path, /*priority*/5);
2031 	ccb->ccb_h.func_code = XPT_SASYNC_CB;
2032 	ccb->event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2033 	ccb->callback = hptiop_async;
2034 	ccb->callback_arg = hba->sim;
2035 	xpt_action((union ccb *)ccb);
2036 	xpt_free_ccb(&ccb->ccb_h);
2037 
2038 	rid = 0;
2039 	if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
2040 			&rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2041 		device_printf(dev, "allocate irq failed!\n");
2042 		goto free_hba_path;
2043 	}
2044 
2045 	if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
2046 				hptiop_pci_intr, hba, &hba->irq_handle, NULL))
2047 	{
2048 		device_printf(dev, "allocate intr function failed!\n");
2049 		goto free_irq_resource;
2050 	}
2051 
2052 	if (hptiop_send_sync_msg(hba,
2053 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2054 		device_printf(dev, "fail to start background task\n");
2055 		goto teartown_irq_resource;
2056 	}
2057 
2058 	hba->ops->enable_intr(hba);
2059 	hba->initialized = 1;
2060 
2061 	hba->ioctl_dev = make_dev(&hptiop_ops, unit,
2062 				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2063 				S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2064 
2065 	hba->ioctl_dev->si_drv1 = hba;
2066 
2067 	hptiop_rescan_bus(hba);
2068 
2069 	return 0;
2070 
2071 
2072 teartown_irq_resource:
2073 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2074 
2075 free_irq_resource:
2076 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2077 
2078 free_hba_path:
2079 	xpt_free_path(hba->path);
2080 
2081 deregister_xpt_bus:
2082 	xpt_bus_deregister(cam_sim_path(hba->sim));
2083 
2084 free_cam_sim:
2085 	cam_sim_free(hba->sim);
2086 
2087 srb_dmamap_unload:
2088 	if (hba->uncached_ptr)
2089 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2090 
2091 srb_dmamem_free:
2092 	if (hba->uncached_ptr)
2093 		bus_dmamem_free(hba->srb_dmat,
2094 			hba->uncached_ptr, hba->srb_dmamap);
2095 
2096 destroy_srb_dmat:
2097 	if (hba->srb_dmat)
2098 		bus_dma_tag_destroy(hba->srb_dmat);
2099 
2100 destroy_io_dmat:
2101 	if (hba->io_dmat)
2102 		bus_dma_tag_destroy(hba->io_dmat);
2103 
2104 get_config_failed:
2105 	hba->ops->internal_memfree(hba);
2106 
2107 destroy_parent_tag:
2108 	if (hba->parent_dmat)
2109 		bus_dma_tag_destroy(hba->parent_dmat);
2110 
2111 release_pci_res:
2112 	if (hba->ops->release_pci_res)
2113 		hba->ops->release_pci_res(hba);
2114 
2115 	return ENXIO;
2116 }
2117 
2118 static int hptiop_detach(device_t dev)
2119 {
2120 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2121 	int i;
2122 	int error = EBUSY;
2123 
2124 	hptiop_lock_adapter(hba);
2125 	for (i = 0; i < hba->max_devices; i++)
2126 		if (hptiop_os_query_remove_device(hba, i)) {
2127 			device_printf(dev, "file system is busy. id=%d", i);
2128 			goto out;
2129 		}
2130 
2131 	if ((error = hptiop_shutdown(dev)) != 0)
2132 		goto out;
2133 	if (hptiop_send_sync_msg(hba,
2134 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2135 		goto out;
2136 
2137 	hptiop_release_resource(hba);
2138 	error = 0;
2139 out:
2140 	hptiop_unlock_adapter(hba);
2141 	return error;
2142 }
2143 
2144 static int hptiop_shutdown(device_t dev)
2145 {
2146 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2147 
2148 	int error = 0;
2149 
2150 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2151 		device_printf(dev, "device is busy");
2152 		return EBUSY;
2153 	}
2154 
2155 	hba->ops->disable_intr(hba);
2156 
2157 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2158 		error = EBUSY;
2159 
2160 	return error;
2161 }
2162 
2163 static void hptiop_pci_intr(void *arg)
2164 {
2165 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2166 	hptiop_lock_adapter(hba);
2167 	hba->ops->iop_intr(hba);
2168 	hptiop_unlock_adapter(hba);
2169 }
2170 
2171 static void hptiop_poll(struct cam_sim *sim)
2172 {
2173 	hptiop_pci_intr(cam_sim_softc(sim));
2174 }
2175 
2176 static void hptiop_async(void * callback_arg, u_int32_t code,
2177 					struct cam_path * path, void * arg)
2178 {
2179 }
2180 
2181 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2182 {
2183 	BUS_SPACE_WRT4_ITL(outbound_intmask,
2184 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2185 }
2186 
2187 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2188 {
2189 	u_int32_t int_mask;
2190 
2191 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2192 
2193 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2194 			| MVIOP_MU_OUTBOUND_INT_MSG;
2195 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2196 }
2197 
2198 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2199 {
2200 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2201 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2202 
2203 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2204 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2205 
2206 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2207 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2208 }
2209 
2210 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2211 {
2212 	u_int32_t int_mask;
2213 
2214 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2215 
2216 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2217 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2218 	BUS_SPACE_RD4_ITL(outbound_intstatus);
2219 }
2220 
2221 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2222 {
2223 	u_int32_t int_mask;
2224 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2225 
2226 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2227 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2228 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2229 	BUS_SPACE_RD4_MV0(outbound_intmask);
2230 }
2231 
2232 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2233 {
2234 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2235 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2236 
2237 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2238 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2239 
2240 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2241 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2242 }
2243 
2244 static void hptiop_reset_adapter(void *argv)
2245 {
2246 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2247 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2248 		return;
2249 	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2250 }
2251 
2252 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2253 {
2254 	struct hpt_iop_srb * srb;
2255 
2256 	if (hba->srb_list) {
2257 		srb = hba->srb_list;
2258 		hba->srb_list = srb->next;
2259 		return srb;
2260 	}
2261 
2262 	return NULL;
2263 }
2264 
2265 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2266 {
2267 	srb->next = hba->srb_list;
2268 	hba->srb_list = srb;
2269 }
2270 
2271 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2272 {
2273 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2274 	struct hpt_iop_srb * srb;
2275 
2276 	switch (ccb->ccb_h.func_code) {
2277 
2278 	case XPT_SCSI_IO:
2279 		hptiop_lock_adapter(hba);
2280 		if (ccb->ccb_h.target_lun != 0 ||
2281 			ccb->ccb_h.target_id >= hba->max_devices ||
2282 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2283 		{
2284 			ccb->ccb_h.status = CAM_TID_INVALID;
2285 			xpt_done(ccb);
2286 			goto scsi_done;
2287 		}
2288 
2289 		if ((srb = hptiop_get_srb(hba)) == NULL) {
2290 			device_printf(hba->pcidev, "srb allocated failed");
2291 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2292 			xpt_done(ccb);
2293 			goto scsi_done;
2294 		}
2295 
2296 		srb->ccb = ccb;
2297 
2298 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
2299 			hptiop_post_scsi_command(srb, NULL, 0, 0);
2300 		else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2301 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2302 				int error;
2303 
2304 				error = bus_dmamap_load(hba->io_dmat,
2305 						srb->dma_map,
2306 						ccb->csio.data_ptr,
2307 						ccb->csio.dxfer_len,
2308 						hptiop_post_scsi_command,
2309 						srb, 0);
2310 
2311 				if (error && error != EINPROGRESS) {
2312 					device_printf(hba->pcidev,
2313 					    "bus_dmamap_load error %d", error);
2314 					xpt_freeze_simq(hba->sim, 1);
2315 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2316 invalid:
2317 					hptiop_free_srb(hba, srb);
2318 					xpt_done(ccb);
2319 					goto scsi_done;
2320 				}
2321 			}
2322 			else {
2323 				device_printf(hba->pcidev,
2324 					"CAM_DATA_PHYS not supported");
2325 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2326 				goto invalid;
2327 			}
2328 		}
2329 		else {
2330 			struct bus_dma_segment *segs;
2331 
2332 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
2333 				(ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2334 				device_printf(hba->pcidev, "SCSI cmd failed");
2335 				ccb->ccb_h.status=CAM_PROVIDE_FAIL;
2336 				goto invalid;
2337 			}
2338 
2339 			segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
2340 			hptiop_post_scsi_command(srb, segs,
2341 						ccb->csio.sglist_cnt, 0);
2342 		}
2343 
2344 scsi_done:
2345 		hptiop_unlock_adapter(hba);
2346 		return;
2347 
2348 	case XPT_RESET_BUS:
2349 		device_printf(hba->pcidev, "reset adapter");
2350 		hptiop_lock_adapter(hba);
2351 		hba->msg_done = 0;
2352 		hptiop_reset_adapter(hba);
2353 		hptiop_unlock_adapter(hba);
2354 		break;
2355 
2356 	case XPT_GET_TRAN_SETTINGS:
2357 	case XPT_SET_TRAN_SETTINGS:
2358 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2359 		break;
2360 
2361 	case XPT_CALC_GEOMETRY:
2362 		cam_calc_geometry(&ccb->ccg, 1);
2363 		break;
2364 
2365 	case XPT_PATH_INQ:
2366 	{
2367 		struct ccb_pathinq *cpi = &ccb->cpi;
2368 
2369 		cpi->version_num = 1;
2370 		cpi->hba_inquiry = PI_SDTR_ABLE;
2371 		cpi->target_sprt = 0;
2372 		cpi->hba_misc = PIM_NOBUSRESET;
2373 		cpi->hba_eng_cnt = 0;
2374 		cpi->max_target = hba->max_devices;
2375 		cpi->max_lun = 0;
2376 		cpi->unit_number = cam_sim_unit(sim);
2377 		cpi->bus_id = cam_sim_bus(sim);
2378 		cpi->initiator_id = hba->max_devices;
2379 		cpi->base_transfer_speed = 3300;
2380 
2381 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2382 		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2383 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2384 		cpi->transport = XPORT_SPI;
2385 		cpi->transport_version = 2;
2386 		cpi->protocol = PROTO_SCSI;
2387 		cpi->protocol_version = SCSI_REV_2;
2388 		cpi->ccb_h.status = CAM_REQ_CMP;
2389 		break;
2390 	}
2391 
2392 	default:
2393 		ccb->ccb_h.status = CAM_REQ_INVALID;
2394 		break;
2395 	}
2396 
2397 	xpt_done(ccb);
2398 	return;
2399 }
2400 
2401 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2402 				struct hpt_iop_srb *srb,
2403 				bus_dma_segment_t *segs, int nsegs)
2404 {
2405 	int idx;
2406 	union ccb *ccb = srb->ccb;
2407 	u_int8_t *cdb;
2408 
2409 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2410 		cdb = ccb->csio.cdb_io.cdb_ptr;
2411 	else
2412 		cdb = ccb->csio.cdb_io.cdb_bytes;
2413 
2414 	KdPrint(("ccb=%p %x-%x-%x\n",
2415 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2416 
2417 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2418 		u_int32_t iop_req32;
2419 		struct hpt_iop_request_scsi_command req;
2420 
2421 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2422 
2423 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2424 			device_printf(hba->pcidev, "invalid req offset\n");
2425 			ccb->ccb_h.status = CAM_BUSY;
2426 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2427 			hptiop_free_srb(hba, srb);
2428 			xpt_done(ccb);
2429 			return;
2430 		}
2431 
2432 		if (ccb->csio.dxfer_len && nsegs > 0) {
2433 			struct hpt_iopsg *psg = req.sg_list;
2434 			for (idx = 0; idx < nsegs; idx++, psg++) {
2435 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2436 				psg->size = segs[idx].ds_len;
2437 				psg->eot = 0;
2438 			}
2439 			psg[-1].eot = 1;
2440 		}
2441 
2442 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2443 
2444 		req.header.size =
2445 				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2446 				+ nsegs*sizeof(struct hpt_iopsg);
2447 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2448 		req.header.flags = 0;
2449 		req.header.result = IOP_RESULT_PENDING;
2450 		req.header.context = (u_int64_t)(unsigned long)srb;
2451 		req.dataxfer_length = ccb->csio.dxfer_len;
2452 		req.channel =  0;
2453 		req.target =  ccb->ccb_h.target_id;
2454 		req.lun =  ccb->ccb_h.target_lun;
2455 
2456 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2457 			(u_int8_t *)&req, req.header.size);
2458 
2459 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2460 			bus_dmamap_sync(hba->io_dmat,
2461 				srb->dma_map, BUS_DMASYNC_PREREAD);
2462 		}
2463 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2464 			bus_dmamap_sync(hba->io_dmat,
2465 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2466 
2467 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2468 	} else {
2469 		struct hpt_iop_request_scsi_command *req;
2470 
2471 		req = (struct hpt_iop_request_scsi_command *)srb;
2472 		if (ccb->csio.dxfer_len && nsegs > 0) {
2473 			struct hpt_iopsg *psg = req->sg_list;
2474 			for (idx = 0; idx < nsegs; idx++, psg++) {
2475 				psg->pci_address =
2476 					(u_int64_t)segs[idx].ds_addr;
2477 				psg->size = segs[idx].ds_len;
2478 				psg->eot = 0;
2479 			}
2480 			psg[-1].eot = 1;
2481 		}
2482 
2483 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2484 
2485 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2486 		req->header.result = IOP_RESULT_PENDING;
2487 		req->dataxfer_length = ccb->csio.dxfer_len;
2488 		req->channel =  0;
2489 		req->target =  ccb->ccb_h.target_id;
2490 		req->lun =  ccb->ccb_h.target_lun;
2491 		req->header.size =
2492 			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2493 			+ nsegs*sizeof(struct hpt_iopsg);
2494 		req->header.context = (u_int64_t)srb->index |
2495 						IOPMU_QUEUE_ADDR_HOST_BIT;
2496 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2497 
2498 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2499 			bus_dmamap_sync(hba->io_dmat,
2500 				srb->dma_map, BUS_DMASYNC_PREREAD);
2501 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2502 			bus_dmamap_sync(hba->io_dmat,
2503 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2504 		}
2505 
2506 		if (hba->firmware_version > 0x01020000
2507 			|| hba->interface_version > 0x01020000) {
2508 			u_int32_t size_bits;
2509 
2510 			if (req->header.size < 256)
2511 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2512 			else if (req->header.size < 512)
2513 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2514 			else
2515 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2516 						| IOPMU_QUEUE_ADDR_HOST_BIT;
2517 
2518 			BUS_SPACE_WRT4_ITL(inbound_queue,
2519 				(u_int32_t)srb->phy_addr | size_bits);
2520 		} else
2521 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2522 				|IOPMU_QUEUE_ADDR_HOST_BIT);
2523 	}
2524 }
2525 
2526 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2527 				struct hpt_iop_srb *srb,
2528 				bus_dma_segment_t *segs, int nsegs)
2529 {
2530 	int idx, size;
2531 	union ccb *ccb = srb->ccb;
2532 	u_int8_t *cdb;
2533 	struct hpt_iop_request_scsi_command *req;
2534 	u_int64_t req_phy;
2535 
2536 	req = (struct hpt_iop_request_scsi_command *)srb;
2537 	req_phy = srb->phy_addr;
2538 
2539 	if (ccb->csio.dxfer_len && nsegs > 0) {
2540 		struct hpt_iopsg *psg = req->sg_list;
2541 		for (idx = 0; idx < nsegs; idx++, psg++) {
2542 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2543 			psg->size = segs[idx].ds_len;
2544 			psg->eot = 0;
2545 		}
2546 		psg[-1].eot = 1;
2547 	}
2548 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2549 		cdb = ccb->csio.cdb_io.cdb_ptr;
2550 	else
2551 		cdb = ccb->csio.cdb_io.cdb_bytes;
2552 
2553 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2554 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2555 	req->header.result = IOP_RESULT_PENDING;
2556 	req->dataxfer_length = ccb->csio.dxfer_len;
2557 	req->channel = 0;
2558 	req->target =  ccb->ccb_h.target_id;
2559 	req->lun =  ccb->ccb_h.target_lun;
2560 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2561 				- sizeof(struct hpt_iopsg)
2562 				+ nsegs * sizeof(struct hpt_iopsg);
2563 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2564 		bus_dmamap_sync(hba->io_dmat,
2565 			srb->dma_map, BUS_DMASYNC_PREREAD);
2566 	}
2567 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2568 		bus_dmamap_sync(hba->io_dmat,
2569 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2570 	req->header.context = (u_int64_t)srb->index
2571 					<< MVIOP_REQUEST_NUMBER_START_BIT
2572 					| MVIOP_CMD_TYPE_SCSI;
2573 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2574 	size = req->header.size >> 8;
2575 	hptiop_mv_inbound_write(req_phy
2576 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2577 			| (size > 3 ? 3 : size), hba);
2578 }
2579 
2580 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2581 				struct hpt_iop_srb *srb,
2582 				bus_dma_segment_t *segs, int nsegs)
2583 {
2584 	int idx, index;
2585 	union ccb *ccb = srb->ccb;
2586 	u_int8_t *cdb;
2587 	struct hpt_iop_request_scsi_command *req;
2588 	u_int64_t req_phy;
2589 
2590 	req = (struct hpt_iop_request_scsi_command *)srb;
2591 	req_phy = srb->phy_addr;
2592 
2593 	if (ccb->csio.dxfer_len && nsegs > 0) {
2594 		struct hpt_iopsg *psg = req->sg_list;
2595 		for (idx = 0; idx < nsegs; idx++, psg++) {
2596 			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2597 			psg->size = segs[idx].ds_len;
2598 			psg->eot = 0;
2599 		}
2600 		psg[-1].eot = 1;
2601 	}
2602 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2603 		cdb = ccb->csio.cdb_io.cdb_ptr;
2604 	else
2605 		cdb = ccb->csio.cdb_io.cdb_bytes;
2606 
2607 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2608 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2609 	req->header.result = IOP_RESULT_PENDING;
2610 	req->dataxfer_length = ccb->csio.dxfer_len;
2611 	req->channel = 0;
2612 	req->target = ccb->ccb_h.target_id;
2613 	req->lun = ccb->ccb_h.target_lun;
2614 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2615 				- sizeof(struct hpt_iopsg)
2616 				+ nsegs * sizeof(struct hpt_iopsg);
2617 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2618 		bus_dmamap_sync(hba->io_dmat,
2619 			srb->dma_map, BUS_DMASYNC_PREREAD);
2620 	}
2621 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2622 		bus_dmamap_sync(hba->io_dmat,
2623 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2624 
2625 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2626 						| IOP_REQUEST_FLAG_ADDR_BITS
2627 						| ((req_phy >> 16) & 0xffff0000);
2628 	req->header.context = ((req_phy & 0xffffffff) << 32 )
2629 						| srb->index << 4
2630 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2631 
2632 	hba->u.mvfrey.inlist_wptr++;
2633 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2634 
2635 	if (index == hba->u.mvfrey.list_count) {
2636 		index = 0;
2637 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2638 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2639 	}
2640 
2641 	hba->u.mvfrey.inlist[index].addr = req_phy;
2642 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2643 
2644 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2645 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2646 
2647 	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2648 		callout_reset(ccb->ccb_h.timeout_ch, 20 * hz,
2649 			      hptiop_reset_adapter, hba);
2650 	}
2651 }
2652 
2653 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2654 					int nsegs, int error)
2655 {
2656 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2657 	union ccb *ccb = srb->ccb;
2658 	struct hpt_iop_hba *hba = srb->hba;
2659 
2660 	if (error || nsegs > hba->max_sg_count) {
2661 		KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2662 			ccb->ccb_h.func_code,
2663 			ccb->ccb_h.target_id,
2664 			ccb->ccb_h.target_lun, nsegs));
2665 		ccb->ccb_h.status = CAM_BUSY;
2666 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2667 		hptiop_free_srb(hba, srb);
2668 		xpt_done(ccb);
2669 		return;
2670 	}
2671 
2672 	hba->ops->post_req(hba, srb, segs, nsegs);
2673 }
2674 
2675 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2676 				int nsegs, int error)
2677 {
2678 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2679 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2680 				& ~(u_int64_t)0x1F;
2681 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2682 				& ~0x1F);
2683 }
2684 
2685 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2686 				int nsegs, int error)
2687 {
2688 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2689 	char *p;
2690 	u_int64_t phy;
2691 	u_int32_t list_count = hba->u.mvfrey.list_count;
2692 
2693 	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2694 				& ~(u_int64_t)0x1F;
2695 	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2696 				& ~0x1F);
2697 
2698 	hba->ctlcfgcmd_phy = phy;
2699 	hba->ctlcfg_ptr = p;
2700 
2701 	p += 0x800;
2702 	phy += 0x800;
2703 
2704 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2705 	hba->u.mvfrey.inlist_phy = phy;
2706 
2707 	p += list_count * sizeof(struct mvfrey_inlist_entry);
2708 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2709 
2710 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2711 	hba->u.mvfrey.outlist_phy = phy;
2712 
2713 	p += list_count * sizeof(struct mvfrey_outlist_entry);
2714 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2715 
2716 	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2717 	hba->u.mvfrey.outlist_cptr_phy = phy;
2718 }
2719 
2720 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2721 				int nsegs, int error)
2722 {
2723 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2724 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2725 	struct hpt_iop_srb *srb, *tmp_srb;
2726 	int i;
2727 
2728 	if (error || nsegs == 0) {
2729 		device_printf(hba->pcidev, "hptiop_map_srb error");
2730 		return;
2731 	}
2732 
2733 	/* map srb */
2734 	srb = (struct hpt_iop_srb *)
2735 		(((unsigned long)hba->uncached_ptr + 0x1F)
2736 		& ~(unsigned long)0x1F);
2737 
2738 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2739 		tmp_srb = (struct hpt_iop_srb *)
2740 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2741 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2742 			if (bus_dmamap_create(hba->io_dmat,
2743 						0, &tmp_srb->dma_map)) {
2744 				device_printf(hba->pcidev, "dmamap create failed");
2745 				return;
2746 			}
2747 
2748 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2749 			tmp_srb->hba = hba;
2750 			tmp_srb->index = i;
2751 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2752 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2753 							(phy_addr >> 5);
2754 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2755 					tmp_srb->srb_flag =
2756 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2757 			} else {
2758 				tmp_srb->phy_addr = phy_addr;
2759 			}
2760 
2761 			hptiop_free_srb(hba, tmp_srb);
2762 			hba->srb[i] = tmp_srb;
2763 			phy_addr += HPT_SRB_MAX_SIZE;
2764 		}
2765 		else {
2766 			device_printf(hba->pcidev, "invalid alignment");
2767 			return;
2768 		}
2769 	}
2770 }
2771 
2772 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2773 {
2774 	hba->msg_done = 1;
2775 }
2776 
2777 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2778 						int target_id)
2779 {
2780 	struct cam_periph       *periph = NULL;
2781 	struct cam_path         *path;
2782 	int                     status, retval = 0;
2783 
2784 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2785 
2786 	if (status == CAM_REQ_CMP) {
2787 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2788 			if (periph->refcount >= 1) {
2789 				device_printf(hba->pcidev, "target_id=0x%x,"
2790 				    "refcount=%d", target_id, periph->refcount);
2791 				retval = -1;
2792 			}
2793 		}
2794 		xpt_free_path(path);
2795 	}
2796 	return retval;
2797 }
2798 
2799 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2800 {
2801 	int i;
2802 	if (hba->path) {
2803 		struct ccb_setasync *ccb;
2804 
2805 		ccb = &xpt_alloc_ccb()->csa;
2806 		xpt_setup_ccb(&ccb->ccb_h, hba->path, /*priority*/5);
2807 		ccb->ccb_h.func_code = XPT_SASYNC_CB;
2808 		ccb->event_enable = 0;
2809 		ccb->callback = hptiop_async;
2810 		ccb->callback_arg = hba->sim;
2811 		xpt_action((union ccb *)ccb);
2812 		xpt_free_path(hba->path);
2813 		xpt_free_ccb(&ccb->ccb_h);
2814 	}
2815 
2816 	if (hba->sim) {
2817 		xpt_bus_deregister(cam_sim_path(hba->sim));
2818 		cam_sim_free(hba->sim);
2819 	}
2820 
2821 	if (hba->ctlcfg_dmat) {
2822 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2823 		bus_dmamem_free(hba->ctlcfg_dmat,
2824 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2825 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2826 	}
2827 
2828 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2829 		struct hpt_iop_srb *srb = hba->srb[i];
2830 		if (srb->dma_map)
2831 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2832 	}
2833 
2834 	if (hba->srb_dmat) {
2835 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2836 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2837 		bus_dma_tag_destroy(hba->srb_dmat);
2838 	}
2839 
2840 	if (hba->io_dmat)
2841 		bus_dma_tag_destroy(hba->io_dmat);
2842 
2843 	if (hba->parent_dmat)
2844 		bus_dma_tag_destroy(hba->parent_dmat);
2845 
2846 	if (hba->irq_handle)
2847 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2848 
2849 	if (hba->irq_res)
2850 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2851 					0, hba->irq_res);
2852 
2853 	if (hba->bar0_res)
2854 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2855 					hba->bar0_rid, hba->bar0_res);
2856 	if (hba->bar2_res)
2857 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2858 					hba->bar2_rid, hba->bar2_res);
2859 	if (hba->ioctl_dev)
2860 		destroy_dev(hba->ioctl_dev);
2861 	dev_ops_remove_minor(&hptiop_ops, device_get_unit(hba->pcidev));
2862 }
2863