xref: /dragonfly/sys/dev/raid/hptrr/hptrr_osm_bsd.c (revision b8c93cad)
1 /*
2  * Copyright (c) HighPoint Technologies, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/hptrr/hptrr_osm_bsd.c,v 1.10 2012/06/17 02:46:27 eadler Exp $
27  */
28 
29 #include <dev/raid/hptrr/hptrr_config.h>
30 /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $
31  *
32  * HighPoint RAID Driver for FreeBSD
33  * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved.
34  */
35 #include <dev/raid/hptrr/os_bsd.h>
36 #include <dev/raid/hptrr/hptintf.h>
37 
38 static int attach_generic = 0;
39 TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic);
40 
41 static int hpt_probe(device_t dev)
42 {
43 	PCI_ID pci_id;
44 	HIM *him;
45 	int i;
46 	PHBA hba;
47 
48 	/* Some of supported chips are used not only by HPT. */
49 	if (pci_get_vendor(dev) != 0x1103 && !attach_generic)
50 		return (ENXIO);
51 	for (him = him_list; him; him = him->next) {
52 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
53 			if ((pci_get_vendor(dev) == pci_id.vid) &&
54 				(pci_get_device(dev) == pci_id.did)){
55 				KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
56 					pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
57 				));
58 				device_set_desc(dev, him->name);
59 				hba = (PHBA)device_get_softc(dev);
60 				memset(hba, 0, sizeof(HBA));
61 				hba->ext_type = EXT_TYPE_HBA;
62 				hba->ldm_adapter.him = him;
63 				return 0;
64 			}
65 		}
66 	}
67 
68 	return (ENXIO);
69 }
70 
71 static int hpt_attach(device_t dev)
72 {
73 	PHBA hba = (PHBA)device_get_softc(dev);
74 	HIM *him = hba->ldm_adapter.him;
75 	PCI_ID pci_id;
76 	HPT_UINT size;
77 	PVBUS vbus;
78 	PVBUS_EXT vbus_ext;
79 
80 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
81 
82 	pci_enable_busmaster(dev);
83 
84 	pci_id.vid = pci_get_vendor(dev);
85 	pci_id.did = pci_get_device(dev);
86 	pci_id.rev = pci_get_revid(dev);
87 
88 	size = him->get_adapter_size(&pci_id);
89 	hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK);
90 	if (!hba->ldm_adapter.him_handle)
91 		return ENXIO;
92 
93 	hba->pcidev = dev;
94 	hba->pciaddr.tree = 0;
95 	hba->pciaddr.bus = pci_get_bus(dev);
96 	hba->pciaddr.device = pci_get_slot(dev);
97 	hba->pciaddr.function = pci_get_function(dev);
98 
99 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
100 		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
101 		return -1;
102 	}
103 
104 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
105 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
106 
107 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
108 		size = ldm_get_vbus_size();
109 		vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
110 		memset(vbus_ext, 0, sizeof(VBUS_EXT));
111 		vbus_ext->ext_type = EXT_TYPE_VBUS;
112 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
113 		ldm_register_adapter(&hba->ldm_adapter);
114 	}
115 
116 	ldm_for_each_vbus(vbus, vbus_ext) {
117 		if (hba->ldm_adapter.vbus==vbus) {
118 			hba->vbus_ext = vbus_ext;
119 			hba->next = vbus_ext->hba_list;
120 			vbus_ext->hba_list = hba;
121 			break;
122 		}
123 	}
124 	return 0;
125 }
126 
127 /*
128  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
129  * but there are some problems currently (alignment, etc).
130  */
131 static __inline void *__get_free_pages(int order)
132 {
133 	/* don't use low memory - other devices may get starved */
134 	return contigmalloc(PAGE_SIZE<<order,
135 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
136 }
137 
138 static __inline void free_pages(void *p, int order)
139 {
140 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
141 }
142 
143 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
144 {
145 	PHBA hba;
146 	struct freelist *f;
147 	HPT_UINT i;
148 	void **p;
149 
150 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
151 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
152 
153 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
154 
155 	for (f=vbus_ext->freelist_head; f; f=f->next) {
156 		KdPrint(("%s: %d*%d=%d bytes",
157 			f->tag, f->count, f->size, f->count*f->size));
158 		for (i=0; i<f->count; i++) {
159 			p = (void **)kmalloc(f->size, M_DEVBUF, M_WAITOK);
160 			if (!p)	return (ENXIO);
161 			*p = f->head;
162 			f->head = p;
163 		}
164 	}
165 
166 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
167 		int order, size, j;
168 
169 		HPT_ASSERT((f->size & (f->alignment-1))==0);
170 
171 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
172 
173 		KdPrint(("%s: %d*%d=%d bytes, order %d",
174 			f->tag, f->count, f->size, f->count*f->size, order));
175 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
176 
177 		for (i=0; i<f->count;) {
178 			p = (void **)__get_free_pages(order);
179 			if (!p) return -1;
180 			for (j = size/f->size; j && i<f->count; i++,j--) {
181 				*p = f->head;
182 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
183 				f->head = p;
184 				p = (void **)((unsigned long)p + f->size);
185 			}
186 		}
187 	}
188 
189 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
190 
191 	for (i=0; i<os_max_cache_pages; i++) {
192 		p = (void **)__get_free_pages(0);
193 		if (!p) return -1;
194 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
195 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
196 	}
197 
198 	return 0;
199 }
200 
201 static void hpt_free_mem(PVBUS_EXT vbus_ext)
202 {
203 	struct freelist *f;
204 	void *p;
205 	int i;
206 	BUS_ADDRESS bus;
207 
208 	for (f=vbus_ext->freelist_head; f; f=f->next) {
209 #if DBG
210 		if (f->count!=f->reserved_count) {
211 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
212 		}
213 #endif
214 		while ((p=freelist_get(f)))
215 			kfree(p, M_DEVBUF);
216 	}
217 
218 	for (i=0; i<os_max_cache_pages; i++) {
219 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
220 		HPT_ASSERT(p);
221 		free_pages(p, 0);
222 	}
223 
224 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
225 		int order, size;
226 #if DBG
227 		if (f->count!=f->reserved_count) {
228 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
229 		}
230 #endif
231 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
232 
233 		while ((p=freelist_get_dma(f, &bus))) {
234 			if (order)
235 				free_pages(p, order);
236 			else {
237 			/* can't free immediately since other blocks in this page may still be in the list */
238 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
239 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
240 			}
241 		}
242 	}
243 
244 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
245 		free_pages(p, 0);
246 }
247 
248 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
249 {
250 	PHBA hba;
251 
252 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
253 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
254 			KdPrint(("fail to initialize %p", hba));
255 			return -1;
256 		}
257 
258 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
259 	return 0;
260 }
261 
262 static void hpt_flush_done(PCOMMAND pCmd)
263 {
264 	PVDEV vd = pCmd->target;
265 
266 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
267 		vd = vd->u.array.transform->target;
268 		HPT_ASSERT(vd);
269 		pCmd->target = vd;
270 		pCmd->Result = RETURN_PENDING;
271 		vdev_queue_cmd(pCmd);
272 		return;
273 	}
274 
275 	*(int *)pCmd->priv = 1;
276 	wakeup(pCmd);
277 }
278 
279 /*
280  * flush a vdev (without retry).
281  */
282 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
283 {
284 	PCOMMAND pCmd;
285 	int result = 0, done;
286 	HPT_UINT count;
287 
288 	KdPrint(("flusing dev %p", vd));
289 
290 	hpt_lock_vbus(vbus_ext);
291 
292 	if (mIsArray(vd->type) && vd->u.array.transform)
293 		count = MAX(vd->u.array.transform->source->cmds_per_request,
294 					vd->u.array.transform->target->cmds_per_request);
295 	else
296 		count = vd->cmds_per_request;
297 
298 	pCmd = ldm_alloc_cmds(vd->vbus, count);
299 
300 	if (!pCmd) {
301 		hpt_unlock_vbus(vbus_ext);
302 		return -1;
303 	}
304 
305 	pCmd->type = CMD_TYPE_FLUSH;
306 	pCmd->flags.hard_flush = 1;
307 	pCmd->target = vd;
308 	pCmd->done = hpt_flush_done;
309 	done = 0;
310 	pCmd->priv = &done;
311 
312 	ldm_queue_cmd(pCmd);
313 
314 	if (!done) {
315 		while (hpt_sleep(vbus_ext, pCmd, 0, "hptfls", HPT_OSM_TIMEOUT)) {
316 			ldm_reset_vbus(vd->vbus);
317 		}
318 	}
319 
320 	KdPrint(("flush result %d", pCmd->Result));
321 
322 	if (pCmd->Result!=RETURN_SUCCESS)
323 		result = -1;
324 
325 	ldm_free_cmds(pCmd);
326 
327 	hpt_unlock_vbus(vbus_ext);
328 
329 	return result;
330 }
331 
332 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
333 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
334 {
335 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
336 	PHBA hba;
337 	int i;
338 
339 	KdPrint(("hpt_shutdown_vbus"));
340 
341 	/* stop all ctl tasks and disable the worker taskqueue */
342 	hpt_stop_tasks(vbus_ext);
343 	vbus_ext->worker.ta_context = NULL;
344 
345 	/* flush devices */
346 	for (i=0; i<osm_max_targets; i++) {
347 		PVDEV vd = ldm_find_target(vbus, i);
348 		if (vd) {
349 			/* retry once */
350 			if (hpt_flush_vdev(vbus_ext, vd))
351 				hpt_flush_vdev(vbus_ext, vd);
352 		}
353 	}
354 
355 	hpt_lock_vbus(vbus_ext);
356 	ldm_shutdown(vbus);
357 	hpt_unlock_vbus(vbus_ext);
358 
359 	ldm_release_vbus(vbus);
360 
361 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
362 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
363 
364 	hpt_free_mem(vbus_ext);
365 
366 	while ((hba=vbus_ext->hba_list)) {
367 		vbus_ext->hba_list = hba->next;
368 		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
369 	}
370 
371 	kfree(vbus_ext, M_DEVBUF);
372 	KdPrint(("hpt_shutdown_vbus done"));
373 }
374 
375 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
376 {
377 	OSM_TASK *tasks;
378 
379 	tasks = vbus_ext->tasks;
380 	vbus_ext->tasks = NULL;
381 
382 	while (tasks) {
383 		OSM_TASK *t = tasks;
384 		tasks = t->next;
385 		t->next = NULL;
386 		t->func(vbus_ext->vbus, t->data);
387 	}
388 }
389 
390 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
391 {
392 	if(vbus_ext){
393 		hpt_lock_vbus(vbus_ext);
394 		__hpt_do_tasks(vbus_ext);
395 		hpt_unlock_vbus(vbus_ext);
396 	}
397 }
398 
399 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
400 static void hpt_poll(struct cam_sim *sim);
401 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
402 static void hpt_pci_intr(void *arg);
403 
404 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
405 {
406 	POS_CMDEXT p = vbus_ext->cmdext_list;
407 	if (p)
408 		vbus_ext->cmdext_list = p->next;
409 	return p;
410 }
411 
412 static __inline void cmdext_put(POS_CMDEXT p)
413 {
414 	p->next = p->vbus_ext->cmdext_list;
415 	p->vbus_ext->cmdext_list = p;
416 }
417 
418 static void hpt_timeout(void *arg)
419 {
420 	PCOMMAND pCmd = (PCOMMAND)arg;
421 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
422 
423 	KdPrint(("pCmd %p timeout", pCmd));
424 
425 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
426 }
427 
428 static void os_cmddone(PCOMMAND pCmd)
429 {
430 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
431 	union ccb *ccb = ext->ccb;
432 
433 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
434 
435 	callout_stop(&ccb->ccb_h.timeout_ch);
436 
437 	switch(pCmd->Result) {
438 	case RETURN_SUCCESS:
439 		ccb->ccb_h.status = CAM_REQ_CMP;
440 		break;
441 	case RETURN_BAD_DEVICE:
442 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
443 		break;
444 	case RETURN_DEVICE_BUSY:
445 		ccb->ccb_h.status = CAM_BUSY;
446 		break;
447 	case RETURN_INVALID_REQUEST:
448 		ccb->ccb_h.status = CAM_REQ_INVALID;
449 		break;
450 	case RETURN_SELECTION_TIMEOUT:
451 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
452 		break;
453 	case RETURN_RETRY:
454 		ccb->ccb_h.status = CAM_BUSY;
455 		break;
456 	default:
457 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
458 		break;
459 	}
460 
461 	if (pCmd->flags.data_in) {
462 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
463 	}
464 	else if (pCmd->flags.data_out) {
465 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
466 	}
467 
468 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
469 
470 	cmdext_put(ext);
471 	ldm_free_cmds(pCmd);
472 	xpt_done(ccb);
473 }
474 
475 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
476 {
477 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
478 	union ccb *ccb = ext->ccb;
479 	bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
480 	int idx;
481 
482 	if(logical)	{
483 		if (ccb->ccb_h.flags & CAM_DATA_PHYS)
484 			panic("physical address unsupported");
485 
486 		if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
487 			if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
488 				panic("physical address unsupported");
489 
490 			for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
491 				os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr);
492 				pSg[idx].size = sgList[idx].ds_len;
493 				pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
494 			}
495 		}
496 		else {
497 			os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
498 			pSg->size = ccb->csio.dxfer_len;
499 			pSg->eot = 1;
500 		}
501 		return TRUE;
502 	}
503 
504 	/* since we have provided physical sg, nobody will ask us to build physical sg */
505 	HPT_ASSERT(0);
506 	return FALSE;
507 }
508 
509 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
510 {
511 	PCOMMAND pCmd = (PCOMMAND)arg;
512 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
513 	PSG psg = pCmd->psg;
514 	int idx;
515 
516 	HPT_ASSERT(pCmd->flags.physical_sg);
517 
518 	if (error || nsegs == 0)
519 		panic("busdma error");
520 
521 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
522 
523 	for (idx = 0; idx < nsegs; idx++, psg++) {
524 		psg->addr.bus = segs[idx].ds_addr;
525 		psg->size = segs[idx].ds_len;
526 		psg->eot = 0;
527 	}
528 	psg[-1].eot = 1;
529 
530 	if (pCmd->flags.data_in) {
531 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD);
532 	}
533 	else if (pCmd->flags.data_out) {
534 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE);
535 	}
536 
537 	callout_reset(&ext->ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
538 	ldm_queue_cmd(pCmd);
539 }
540 
541 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
542 {
543 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
544 	PVDEV vd;
545 	PCOMMAND pCmd;
546 	POS_CMDEXT ext;
547 	HPT_U8 *cdb;
548 
549 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
550 		cdb = ccb->csio.cdb_io.cdb_ptr;
551 	else
552 		cdb = ccb->csio.cdb_io.cdb_bytes;
553 
554 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
555 		ccb,
556 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
557 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
558 	));
559 
560 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
561 	if (ccb->ccb_h.target_lun != 0 ||
562 		ccb->ccb_h.target_id >= osm_max_targets ||
563 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
564 	{
565 		ccb->ccb_h.status = CAM_TID_INVALID;
566 		xpt_done(ccb);
567 		return;
568 	}
569 
570 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
571 
572 	if (!vd) {
573 		ccb->ccb_h.status = CAM_TID_INVALID;
574 		xpt_done(ccb);
575 		return;
576 	}
577 
578 	switch (cdb[0]) {
579 	case TEST_UNIT_READY:
580 	case START_STOP_UNIT:
581 	case SYNCHRONIZE_CACHE:
582 		ccb->ccb_h.status = CAM_REQ_CMP;
583 		break;
584 
585 	case INQUIRY:
586 		{
587 			PINQUIRYDATA inquiryData;
588 			memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
589 			inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
590 
591 			inquiryData->AdditionalLength = 31;
592 			inquiryData->CommandQueue = 1;
593 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
594 			memcpy(&inquiryData->ProductId, "DISK 0_0        ", 16);
595 
596 			if (vd->target_id / 10) {
597 				inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
598 				inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
599 			}
600 			else
601 				inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
602 
603 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
604 
605 			ccb->ccb_h.status = CAM_REQ_CMP;
606 		}
607 		break;
608 
609 	case READ_CAPACITY:
610 	{
611 		HPT_U8 *rbuf = ccb->csio.data_ptr;
612 		HPT_U32 cap;
613 
614 		if (vd->capacity>0xfffffffful)
615 			cap = 0xfffffffful;
616 		else
617 			cap = vd->capacity - 1;
618 
619 		rbuf[0] = (HPT_U8)(cap>>24);
620 		rbuf[1] = (HPT_U8)(cap>>16);
621 		rbuf[2] = (HPT_U8)(cap>>8);
622 		rbuf[3] = (HPT_U8)cap;
623 		rbuf[4] = 0;
624 		rbuf[5] = 0;
625 		rbuf[6] = 2;
626 		rbuf[7] = 0;
627 
628 		ccb->ccb_h.status = CAM_REQ_CMP;
629 		break;
630 	}
631 
632 	case SERVICE_ACTION_IN:
633 	{
634 		HPT_U8 *rbuf = ccb->csio.data_ptr;
635 		HPT_U64	cap = vd->capacity - 1;
636 
637 		rbuf[0] = (HPT_U8)(cap>>56);
638 		rbuf[1] = (HPT_U8)(cap>>48);
639 		rbuf[2] = (HPT_U8)(cap>>40);
640 		rbuf[3] = (HPT_U8)(cap>>32);
641 		rbuf[4] = (HPT_U8)(cap>>24);
642 		rbuf[5] = (HPT_U8)(cap>>16);
643 		rbuf[6] = (HPT_U8)(cap>>8);
644 		rbuf[7] = (HPT_U8)cap;
645 		rbuf[8] = 0;
646 		rbuf[9] = 0;
647 		rbuf[10] = 2;
648 		rbuf[11] = 0;
649 
650 		ccb->ccb_h.status = CAM_REQ_CMP;
651 		break;
652 	}
653 
654 	case READ_6:
655 	case READ_10:
656 	case READ_16:
657 	case WRITE_6:
658 	case WRITE_10:
659 	case WRITE_16:
660 	case 0x13:
661 	case 0x2f:
662 	{
663 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
664 		if(!pCmd){
665 			KdPrint(("Failed to allocate command!"));
666 			ccb->ccb_h.status = CAM_BUSY;
667 			break;
668 		}
669 
670 		switch (cdb[0])	{
671 		case READ_6:
672 		case WRITE_6:
673 		case 0x13:
674 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
675 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
676 			break;
677 		case READ_16:
678 		case WRITE_16:
679 		{
680 			HPT_U64 block =
681 				((HPT_U64)cdb[2]<<56) |
682 				((HPT_U64)cdb[3]<<48) |
683 				((HPT_U64)cdb[4]<<40) |
684 				((HPT_U64)cdb[5]<<32) |
685 				((HPT_U64)cdb[6]<<24) |
686 				((HPT_U64)cdb[7]<<16) |
687 				((HPT_U64)cdb[8]<<8) |
688 				((HPT_U64)cdb[9]);
689 			pCmd->uCmd.Ide.Lba = block;
690 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
691 			break;
692 		}
693 
694 		default:
695 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
696 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
697 			break;
698 		}
699 
700 		switch (cdb[0]) {
701 		case READ_6:
702 		case READ_10:
703 		case READ_16:
704 			pCmd->flags.data_in = 1;
705 			break;
706 		case WRITE_6:
707 		case WRITE_10:
708 		case WRITE_16:
709 			pCmd->flags.data_out = 1;
710 			break;
711 		}
712 		pCmd->priv = ext = cmdext_get(vbus_ext);
713 		HPT_ASSERT(ext);
714 		ext->ccb = ccb;
715 		pCmd->target = vd;
716 		pCmd->done = os_cmddone;
717 		pCmd->buildsgl = os_buildsgl;
718 
719 		pCmd->psg = ext->psg;
720 
721 		if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
722 			int idx;
723 			bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
724 
725 			if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
726 				pCmd->flags.physical_sg = 1;
727 
728 			for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
729 				pCmd->psg[idx].addr.bus = sgList[idx].ds_addr;
730 				pCmd->psg[idx].size = sgList[idx].ds_len;
731 				pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
732 			}
733 
734 			callout_reset(&ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
735 			ldm_queue_cmd(pCmd);
736 		}
737 		else {
738 			int error;
739 			pCmd->flags.physical_sg = 1;
740 			error = bus_dmamap_load(vbus_ext->io_dmat,
741 						ext->dma_map,
742 						ccb->csio.data_ptr, ccb->csio.dxfer_len,
743 						hpt_io_dmamap_callback, pCmd,
744 					BUS_DMA_WAITOK
745 					);
746 			KdPrint(("bus_dmamap_load return %d", error));
747 			if (error && error!=EINPROGRESS) {
748 				os_printk("bus_dmamap_load error %d", error);
749 				cmdext_put(ext);
750 				ldm_free_cmds(pCmd);
751 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
752 				xpt_done(ccb);
753 			}
754 		}
755 		return;
756 	}
757 
758 	default:
759 		ccb->ccb_h.status = CAM_REQ_INVALID;
760 		break;
761 	}
762 
763 	xpt_done(ccb);
764 	return;
765 }
766 
767 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
768 {
769 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
770 
771 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
772 
773 	switch (ccb->ccb_h.func_code) {
774 
775 	case XPT_SCSI_IO:
776 		hpt_lock_vbus(vbus_ext);
777 		hpt_scsi_io(vbus_ext, ccb);
778 		hpt_unlock_vbus(vbus_ext);
779 		return;
780 
781 	case XPT_RESET_BUS:
782 		hpt_lock_vbus(vbus_ext);
783 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
784 		hpt_unlock_vbus(vbus_ext);
785 		break;
786 
787 	case XPT_GET_TRAN_SETTINGS:
788 	case XPT_SET_TRAN_SETTINGS:
789 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
790 		break;
791 
792 	case XPT_CALC_GEOMETRY:
793 		cam_calc_geometry(&ccb->ccg, 1);
794 		break;
795 
796 	case XPT_PATH_INQ:
797 	{
798 		struct ccb_pathinq *cpi = &ccb->cpi;
799 
800 		cpi->version_num = 1;
801 		cpi->hba_inquiry = PI_SDTR_ABLE;
802 		cpi->target_sprt = 0;
803 		cpi->hba_misc = PIM_NOBUSRESET;
804 		cpi->hba_eng_cnt = 0;
805 		cpi->max_target = osm_max_targets;
806 		cpi->max_lun = 0;
807 		cpi->unit_number = cam_sim_unit(sim);
808 		cpi->bus_id = cam_sim_bus(sim);
809 		cpi->initiator_id = osm_max_targets;
810 		cpi->base_transfer_speed = 3300;
811 
812 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
813 		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
814 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
815 		cpi->transport = XPORT_SPI;
816 		cpi->transport_version = 2;
817 		cpi->protocol = PROTO_SCSI;
818 		cpi->protocol_version = SCSI_REV_2;
819 		cpi->maxio = HPTRR_DFLTPHYS;
820 		cpi->ccb_h.status = CAM_REQ_CMP;
821 		break;
822 	}
823 
824 	default:
825 		ccb->ccb_h.status = CAM_REQ_INVALID;
826 		break;
827 	}
828 
829 	xpt_done(ccb);
830 	return;
831 }
832 
833 static void hpt_pci_intr(void *arg)
834 {
835 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
836 	hpt_lock_vbus(vbus_ext);
837 	ldm_intr((PVBUS)vbus_ext->vbus);
838 	hpt_unlock_vbus(vbus_ext);
839 }
840 
841 static void hpt_poll(struct cam_sim *sim)
842 {
843 	hpt_pci_intr(cam_sim_softc(sim));
844 }
845 
846 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
847 {
848 	KdPrint(("hpt_async"));
849 }
850 
851 static int hpt_shutdown(device_t dev)
852 {
853 	KdPrint(("hpt_shutdown(dev=%p)", dev));
854 	return 0;
855 }
856 
857 static int hpt_detach(device_t dev)
858 {
859 	/* we don't allow the driver to be unloaded. */
860 	return EBUSY;
861 }
862 
863 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
864 {
865 	arg->ioctl_cmnd = NULL;
866 	wakeup(arg);
867 }
868 
869 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
870 {
871 	ioctl_args->result = -1;
872 	ioctl_args->done = hpt_ioctl_done;
873 	ioctl_args->ioctl_cmnd = (void *)1;
874 
875 	hpt_lock_vbus(vbus_ext);
876 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
877 
878 	while (ioctl_args->ioctl_cmnd) {
879 		if (hpt_sleep(vbus_ext, ioctl_args, 0, "hptctl", HPT_OSM_TIMEOUT)==0)
880 			break;
881 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
882 		__hpt_do_tasks(vbus_ext);
883 	}
884 
885 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
886 
887 	hpt_unlock_vbus(vbus_ext);
888 }
889 
890 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
891 {
892 	PVBUS vbus;
893 	PVBUS_EXT vbus_ext;
894 
895 	ldm_for_each_vbus(vbus, vbus_ext) {
896 		__hpt_do_ioctl(vbus_ext, ioctl_args);
897 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
898 			return;
899 	}
900 }
901 
902 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
903 	IOCTL_ARG arg;\
904 	arg.dwIoControlCode = code;\
905 	arg.lpInBuffer = inbuf;\
906 	arg.lpOutBuffer = outbuf;\
907 	arg.nInBufferSize = insize;\
908 	arg.nOutBufferSize = outsize;\
909 	arg.lpBytesReturned = NULL;\
910 	hpt_do_ioctl(&arg);\
911 	arg.result;\
912 })
913 
914 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
915 
916 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
917 {
918 	int i;
919 	HPT_U32 count = nMaxCount-1;
920 
921 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
922 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
923 		return -1;
924 
925 	nMaxCount = (int)pIds[0];
926 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
927 	return nMaxCount;
928 }
929 
930 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
931 {
932 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
933 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
934 }
935 
936 /* not belong to this file logically, but we want to use ioctl interface */
937 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
938 {
939 	LOGICAL_DEVICE_INFO_V3 devinfo;
940 	int i, result;
941 	DEVICEID param[2] = { id, 0 };
942 
943 	if (hpt_get_device_info_v3(id, &devinfo))
944 		return -1;
945 
946 	if (devinfo.Type!=LDT_ARRAY)
947 		return -1;
948 
949 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
950 		param[1] = AS_REBUILD_ABORT;
951 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
952 		param[1] = AS_VERIFY_ABORT;
953 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
954 		param[1] = AS_INITIALIZE_ABORT;
955 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
956 		param[1] = AS_TRANSFORM_ABORT;
957 	else
958 		return -1;
959 
960 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
961 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
962 				param, sizeof(param), NULL, 0);
963 
964 	for (i=0; i<devinfo.u.array.nDisk; i++)
965 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
966 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
967 
968 	return result;
969 }
970 
971 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
972 {
973 	DEVICEID ids[32];
974 	int i, count;
975 
976 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
977 
978 	for (i=0; i<count; i++)
979 		__hpt_stop_tasks(vbus_ext, ids[i]);
980 }
981 
982 static	d_open_t	hpt_open;
983 static	d_close_t	hpt_close;
984 static	d_ioctl_t	hpt_ioctl;
985 static  int 		hpt_rescan_bus(void);
986 static  void 		hpt_rescan_callback(struct cam_periph *periph, union ccb *ccb);
987 
988 static struct dev_ops hpt_ops = {
989 	{ driver_name, 0, 0 },
990 	.d_open =	hpt_open,
991 	.d_close =	hpt_close,
992 	.d_ioctl =	hpt_ioctl,
993 };
994 
995 static struct intr_config_hook hpt_ich;
996 
997 /*
998  * hpt_final_init will be called after all hpt_attach.
999  */
1000 static void hpt_final_init(void *dummy)
1001 {
1002 	int       i;
1003 	PVBUS_EXT vbus_ext;
1004 	PVBUS vbus;
1005 	PHBA hba;
1006 
1007 	/* Clear the config hook */
1008 	config_intrhook_disestablish(&hpt_ich);
1009 
1010 	/* allocate memory */
1011 	i = 0;
1012 	ldm_for_each_vbus(vbus, vbus_ext) {
1013 		if (hpt_alloc_mem(vbus_ext)) {
1014 			os_printk("out of memory");
1015 			return;
1016 		}
1017 		i++;
1018 	}
1019 
1020 	if (!i) {
1021 		if (bootverbose)
1022 			os_printk("no controller detected.");
1023 		return;
1024 	}
1025 
1026 	/* initializing hardware */
1027 	ldm_for_each_vbus(vbus, vbus_ext) {
1028 		/* make timer available here */
1029 		callout_init(&vbus_ext->timer);
1030 		if (hpt_init_vbus(vbus_ext)) {
1031 			os_printk("fail to initialize hardware");
1032 			break; /* FIXME */
1033 		}
1034 	}
1035 
1036 	/* register CAM interface */
1037 	ldm_for_each_vbus(vbus, vbus_ext) {
1038 		struct cam_devq *devq;
1039 		struct ccb_setasync	ccb;
1040 
1041 		lockinit(&vbus_ext->lock, "hptsleeplock", 0, LK_CANRECURSE);
1042 		if (bus_dma_tag_create(NULL,/* parent */
1043 				4,	/* alignment */
1044 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1045 				BUS_SPACE_MAXADDR,	/* lowaddr */
1046 				BUS_SPACE_MAXADDR, 	/* highaddr */
1047 				NULL, NULL, 		/* filter, filterarg */
1048 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1049 				os_max_sg_descriptors,	/* nsegments */
1050 				0x10000,	/* maxsegsize */
1051 				BUS_DMA_WAITOK,		/* flags */
1052 				&vbus_ext->io_dmat	/* tag */))
1053 		{
1054 			return ;
1055 		}
1056 
1057 		for (i=0; i<os_max_queue_comm; i++) {
1058 			POS_CMDEXT ext = (POS_CMDEXT)kmalloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1059 			if (!ext) {
1060 				os_printk("Can't alloc cmdext(%d)", i);
1061 				return ;
1062 			}
1063 			ext->vbus_ext = vbus_ext;
1064 			ext->next = vbus_ext->cmdext_list;
1065 			vbus_ext->cmdext_list = ext;
1066 
1067 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1068 				os_printk("Can't create dma map(%d)", i);
1069 				return ;
1070 			}
1071 		}
1072 
1073 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1074 			os_printk("cam_simq_alloc failed");
1075 			return ;
1076 		}
1077 
1078 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1079 				vbus_ext, 0, &sim_mplock, os_max_queue_comm, /*tagged*/8,  devq);
1080 		cam_simq_release(devq);
1081 
1082 		if (!vbus_ext->sim) {
1083 			os_printk("cam_sim_alloc failed");
1084 			return ;
1085 		}
1086 
1087 		if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) {
1088 			os_printk("xpt_bus_register failed");
1089 			cam_sim_free(vbus_ext->sim);
1090 			vbus_ext->sim = NULL;
1091 			return ;
1092 		}
1093 
1094 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1095 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1096 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1097 		{
1098 			os_printk("xpt_create_path failed");
1099 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1100 			cam_sim_free(vbus_ext->sim);
1101 			vbus_ext->sim = NULL;
1102 			return ;
1103 		}
1104 
1105 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1106 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1107 		ccb.event_enable = AC_LOST_DEVICE;
1108 		ccb.callback = hpt_async;
1109 		ccb.callback_arg = vbus_ext;
1110 		xpt_action((union ccb *)&ccb);
1111 
1112 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1113 			int rid = 0;
1114 			if ((hba->irq_res = bus_alloc_resource(hba->pcidev,
1115 				SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1116 			{
1117 				os_printk("can't allocate interrupt");
1118 				return ;
1119 			}
1120 
1121 			if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
1122 				hpt_pci_intr, vbus_ext, &hba->irq_handle, NULL))
1123 			{
1124 				os_printk("can't set up interrupt");
1125 				return ;
1126 			}
1127 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1128 		}
1129 
1130 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1131 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1132 		if (!vbus_ext->shutdown_eh)
1133 			os_printk("Shutdown event registration failed");
1134 	}
1135 
1136 	ldm_for_each_vbus(vbus, vbus_ext) {
1137 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1138 		if (vbus_ext->tasks)
1139 			TASK_ENQUEUE(&vbus_ext->worker);
1140 	}
1141 
1142 	make_dev(&hpt_ops, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1143 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1144 }
1145 
1146 #if defined(KLD_MODULE)
1147 
1148 typedef struct driverlink *driverlink_t;
1149 struct driverlink {
1150 	kobj_class_t	driver;
1151 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1152 };
1153 
1154 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1155 
1156 struct devclass {
1157 	TAILQ_ENTRY(devclass) link;
1158 	devclass_t	parent;		/* parent in devclass hierarchy */
1159 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1160 	char		*name;
1161 	device_t	*devices;	/* array of devices indexed by unit */
1162 	int		maxunit;	/* size of devices array */
1163 };
1164 
1165 static void override_kernel_driver(void)
1166 {
1167 	driverlink_t dl, dlfirst;
1168 	driver_t *tmpdriver;
1169 	devclass_t dc = devclass_find("pci");
1170 
1171 	if (dc){
1172 		dlfirst = TAILQ_FIRST(&dc->drivers);
1173 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1174 			if(strcmp(dl->driver->name, driver_name) == 0) {
1175 				tmpdriver=dl->driver;
1176 				dl->driver=dlfirst->driver;
1177 				dlfirst->driver=tmpdriver;
1178 				break;
1179 			}
1180 		}
1181 	}
1182 }
1183 
1184 #else
1185 #define override_kernel_driver()
1186 #endif
1187 
1188 static void hpt_init(void *dummy)
1189 {
1190 	if (bootverbose)
1191 		os_printk("%s %s", driver_name_long, driver_ver);
1192 
1193 	override_kernel_driver();
1194 	init_config();
1195 
1196 	hpt_ich.ich_func = hpt_final_init;
1197 	hpt_ich.ich_arg = NULL;
1198 	hpt_ich.ich_desc = "hptrr";
1199 	if (config_intrhook_establish(&hpt_ich) != 0) {
1200 		kprintf("%s: cannot establish configuration hook\n",
1201 		    driver_name_long);
1202 	}
1203 
1204 }
1205 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1206 
1207 /*
1208  * CAM driver interface
1209  */
1210 static device_method_t driver_methods[] = {
1211 	/* Device interface */
1212 	DEVMETHOD(device_probe,		hpt_probe),
1213 	DEVMETHOD(device_attach,	hpt_attach),
1214 	DEVMETHOD(device_detach,	hpt_detach),
1215 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1216 	DEVMETHOD_END
1217 };
1218 
1219 static driver_t hpt_pci_driver = {
1220 	driver_name,
1221 	driver_methods,
1222 	sizeof(HBA)
1223 };
1224 
1225 static devclass_t	hpt_devclass;
1226 
1227 #ifndef TARGETNAME
1228 #error "no TARGETNAME found"
1229 #endif
1230 
1231 /* use this to make TARGETNAME be expanded */
1232 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6)
1233 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1234 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1235 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, NULL, NULL);
1236 __MODULE_VERSION(TARGETNAME, 1);
1237 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1238 
1239 typedef struct cdev * ioctl_dev_t;
1240 
1241 typedef	struct thread *	ioctl_thread_t;
1242 
1243 static int hpt_open(struct dev_open_args *ap)
1244 {
1245 	return 0;
1246 }
1247 
1248 static int hpt_close(struct dev_close_args *ap)
1249 {
1250 	return 0;
1251 }
1252 
1253 static int hpt_ioctl(struct dev_ioctl_args *ap)
1254 {
1255 	u_long cmd = ap->a_cmd;
1256 	caddr_t data = ap->a_data;
1257 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1258 	IOCTL_ARG ioctl_args;
1259 	HPT_U32 bytesReturned;
1260 
1261 	switch (cmd){
1262 	case HPT_DO_IOCONTROL:
1263 	{
1264 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1265 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1266 				piop->dwIoControlCode,
1267 				piop->lpInBuffer,
1268 				piop->nInBufferSize,
1269 				piop->lpOutBuffer,
1270 				piop->nOutBufferSize));
1271 
1272 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1273 
1274 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1275 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1276 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1277 		ioctl_args.lpBytesReturned = &bytesReturned;
1278 
1279 		if (ioctl_args.nInBufferSize) {
1280 			ioctl_args.lpInBuffer = kmalloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1281 			if (!ioctl_args.lpInBuffer)
1282 				goto invalid;
1283 			if (copyin((void*)piop->lpInBuffer,
1284 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1285 				goto invalid;
1286 		}
1287 
1288 		if (ioctl_args.nOutBufferSize) {
1289 			ioctl_args.lpOutBuffer = kmalloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK);
1290 			if (!ioctl_args.lpOutBuffer)
1291 				goto invalid;
1292 		}
1293 
1294 		get_mplock();
1295 
1296 		hpt_do_ioctl(&ioctl_args);
1297 
1298 		rel_mplock();
1299 
1300 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1301 			if (piop->nOutBufferSize) {
1302 				if (copyout(ioctl_args.lpOutBuffer,
1303 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1304 					goto invalid;
1305 			}
1306 			if (piop->lpBytesReturned) {
1307 				if (copyout(&bytesReturned,
1308 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1309 					goto invalid;
1310 			}
1311 			if (ioctl_args.lpInBuffer) kfree(ioctl_args.lpInBuffer, M_DEVBUF);
1312 			if (ioctl_args.lpOutBuffer) kfree(ioctl_args.lpOutBuffer, M_DEVBUF);
1313 			return 0;
1314 		}
1315 invalid:
1316 		if (ioctl_args.lpInBuffer) kfree(ioctl_args.lpInBuffer, M_DEVBUF);
1317 		if (ioctl_args.lpOutBuffer) kfree(ioctl_args.lpOutBuffer, M_DEVBUF);
1318 		return EFAULT;
1319 	}
1320 	return EFAULT;
1321 	}
1322 
1323 	case HPT_SCAN_BUS:
1324 	{
1325 		return hpt_rescan_bus();
1326 	}
1327 	default:
1328 		KdPrint(("invalid command!"));
1329 		return EFAULT;
1330 	}
1331 
1332 }
1333 
1334 static void	hpt_rescan_callback(struct cam_periph *periph, union ccb *ccb)
1335 {
1336 	xpt_free_path(ccb->ccb_h.path);
1337 	xpt_free_ccb(ccb);
1338 }
1339 
1340 static int	hpt_rescan_bus(void)
1341 {
1342 	union ccb			*ccb;
1343 	PVBUS 				vbus;
1344 	PVBUS_EXT			vbus_ext;
1345 
1346 	get_mplock();
1347 
1348 	ldm_for_each_vbus(vbus, vbus_ext) {
1349 		if ((ccb = xpt_alloc_ccb()) == NULL)
1350 			return(ENOMEM);
1351 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1352 		    cam_sim_path(vbus_ext->sim),
1353 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1354 			xpt_free_ccb(ccb);
1355 			return(EIO);
1356 		}
1357 
1358 		xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1359 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1360 		ccb->ccb_h.cbfcnp = hpt_rescan_callback;
1361 		ccb->crcn.flags = CAM_FLAG_NONE;
1362 		xpt_action(ccb); /* scan is now in progress */
1363 	}
1364 
1365 	return(0);
1366 }
1367