xref: /dragonfly/sys/dev/raid/hptrr/hptrr_osm_bsd.c (revision 89a89091)
1 /*
2  * Copyright (c) HighPoint Technologies, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/hptrr/hptrr_osm_bsd.c,v 1.10 2012/06/17 02:46:27 eadler Exp $
27  */
28 
29 #include <dev/raid/hptrr/hptrr_config.h>
30 /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $
31  *
32  * HighPoint RAID Driver for FreeBSD
33  * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved.
34  */
35 #include <dev/raid/hptrr/os_bsd.h>
36 #include <dev/raid/hptrr/hptintf.h>
37 
38 static int attach_generic = 0;
39 TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic);
40 
41 static int hpt_probe(device_t dev)
42 {
43 	PCI_ID pci_id;
44 	HIM *him;
45 	int i;
46 	PHBA hba;
47 
48 	/* Some of supported chips are used not only by HPT. */
49 	if (pci_get_vendor(dev) != 0x1103 && !attach_generic)
50 		return (ENXIO);
51 	for (him = him_list; him; him = him->next) {
52 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
53 			if ((pci_get_vendor(dev) == pci_id.vid) &&
54 				(pci_get_device(dev) == pci_id.did)){
55 				KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
56 					pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
57 				));
58 				device_set_desc(dev, him->name);
59 				hba = (PHBA)device_get_softc(dev);
60 				memset(hba, 0, sizeof(HBA));
61 				hba->ext_type = EXT_TYPE_HBA;
62 				hba->ldm_adapter.him = him;
63 				return 0;
64 			}
65 		}
66 	}
67 
68 	return (ENXIO);
69 }
70 
71 static int hpt_attach(device_t dev)
72 {
73 	PHBA hba = (PHBA)device_get_softc(dev);
74 	HIM *him = hba->ldm_adapter.him;
75 	PCI_ID pci_id;
76 	HPT_UINT size;
77 	PVBUS vbus;
78 	PVBUS_EXT vbus_ext;
79 
80 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
81 
82 	pci_enable_busmaster(dev);
83 
84 	pci_id.vid = pci_get_vendor(dev);
85 	pci_id.did = pci_get_device(dev);
86 	pci_id.rev = pci_get_revid(dev);
87 
88 	size = him->get_adapter_size(&pci_id);
89 	hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK);
90 	if (!hba->ldm_adapter.him_handle)
91 		return ENXIO;
92 
93 	hba->pcidev = dev;
94 	hba->pciaddr.tree = 0;
95 	hba->pciaddr.bus = pci_get_bus(dev);
96 	hba->pciaddr.device = pci_get_slot(dev);
97 	hba->pciaddr.function = pci_get_function(dev);
98 
99 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
100 		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
101 		return -1;
102 	}
103 
104 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
105 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
106 
107 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
108 		size = ldm_get_vbus_size();
109 		vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
110 		if (!vbus_ext) {
111 			kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
112 			return -1;
113 		}
114 		memset(vbus_ext, 0, sizeof(VBUS_EXT));
115 		vbus_ext->ext_type = EXT_TYPE_VBUS;
116 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
117 		ldm_register_adapter(&hba->ldm_adapter);
118 	}
119 
120 	ldm_for_each_vbus(vbus, vbus_ext) {
121 		if (hba->ldm_adapter.vbus==vbus) {
122 			hba->vbus_ext = vbus_ext;
123 			hba->next = vbus_ext->hba_list;
124 			vbus_ext->hba_list = hba;
125 			break;
126 		}
127 	}
128 	return 0;
129 }
130 
131 /*
132  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
133  * but there are some problems currently (alignment, etc).
134  */
135 static __inline void *__get_free_pages(int order)
136 {
137 	/* don't use low memory - other devices may get starved */
138 	return contigmalloc(PAGE_SIZE<<order,
139 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
140 }
141 
142 static __inline void free_pages(void *p, int order)
143 {
144 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
145 }
146 
147 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
148 {
149 	PHBA hba;
150 	struct freelist *f;
151 	HPT_UINT i;
152 	void **p;
153 
154 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
155 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
156 
157 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
158 
159 	for (f=vbus_ext->freelist_head; f; f=f->next) {
160 		KdPrint(("%s: %d*%d=%d bytes",
161 			f->tag, f->count, f->size, f->count*f->size));
162 		for (i=0; i<f->count; i++) {
163 			p = (void **)kmalloc(f->size, M_DEVBUF, M_WAITOK);
164 			if (!p)	return (ENXIO);
165 			*p = f->head;
166 			f->head = p;
167 		}
168 	}
169 
170 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
171 		int order, size, j;
172 
173 		HPT_ASSERT((f->size & (f->alignment-1))==0);
174 
175 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
176 
177 		KdPrint(("%s: %d*%d=%d bytes, order %d",
178 			f->tag, f->count, f->size, f->count*f->size, order));
179 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
180 
181 		for (i=0; i<f->count;) {
182 			p = (void **)__get_free_pages(order);
183 			if (!p) return -1;
184 			for (j = size/f->size; j && i<f->count; i++,j--) {
185 				*p = f->head;
186 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
187 				f->head = p;
188 				p = (void **)((unsigned long)p + f->size);
189 			}
190 		}
191 	}
192 
193 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
194 
195 	for (i=0; i<os_max_cache_pages; i++) {
196 		p = (void **)__get_free_pages(0);
197 		if (!p) return -1;
198 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
199 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
200 	}
201 
202 	return 0;
203 }
204 
205 static void hpt_free_mem(PVBUS_EXT vbus_ext)
206 {
207 	struct freelist *f;
208 	void *p;
209 	int i;
210 	BUS_ADDRESS bus;
211 
212 	for (f=vbus_ext->freelist_head; f; f=f->next) {
213 #if DBG
214 		if (f->count!=f->reserved_count) {
215 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
216 		}
217 #endif
218 		while ((p=freelist_get(f)))
219 			kfree(p, M_DEVBUF);
220 	}
221 
222 	for (i=0; i<os_max_cache_pages; i++) {
223 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
224 		HPT_ASSERT(p);
225 		free_pages(p, 0);
226 	}
227 
228 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
229 		int order, size;
230 #if DBG
231 		if (f->count!=f->reserved_count) {
232 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
233 		}
234 #endif
235 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
236 
237 		while ((p=freelist_get_dma(f, &bus))) {
238 			if (order)
239 				free_pages(p, order);
240 			else {
241 			/* can't free immediately since other blocks in this page may still be in the list */
242 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
243 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
244 			}
245 		}
246 	}
247 
248 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
249 		free_pages(p, 0);
250 }
251 
252 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
253 {
254 	PHBA hba;
255 
256 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
257 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
258 			KdPrint(("fail to initialize %p", hba));
259 			return -1;
260 		}
261 
262 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
263 	return 0;
264 }
265 
266 static void hpt_flush_done(PCOMMAND pCmd)
267 {
268 	PVDEV vd = pCmd->target;
269 
270 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
271 		vd = vd->u.array.transform->target;
272 		HPT_ASSERT(vd);
273 		pCmd->target = vd;
274 		pCmd->Result = RETURN_PENDING;
275 		vdev_queue_cmd(pCmd);
276 		return;
277 	}
278 
279 	*(int *)pCmd->priv = 1;
280 	wakeup(pCmd);
281 }
282 
283 /*
284  * flush a vdev (without retry).
285  */
286 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
287 {
288 	PCOMMAND pCmd;
289 	int result = 0, done;
290 	HPT_UINT count;
291 
292 	KdPrint(("flusing dev %p", vd));
293 
294 	hpt_lock_vbus(vbus_ext);
295 
296 	if (mIsArray(vd->type) && vd->u.array.transform)
297 		count = MAX(vd->u.array.transform->source->cmds_per_request,
298 					vd->u.array.transform->target->cmds_per_request);
299 	else
300 		count = vd->cmds_per_request;
301 
302 	pCmd = ldm_alloc_cmds(vd->vbus, count);
303 
304 	if (!pCmd) {
305 		hpt_unlock_vbus(vbus_ext);
306 		return -1;
307 	}
308 
309 	pCmd->type = CMD_TYPE_FLUSH;
310 	pCmd->flags.hard_flush = 1;
311 	pCmd->target = vd;
312 	pCmd->done = hpt_flush_done;
313 	done = 0;
314 	pCmd->priv = &done;
315 
316 	ldm_queue_cmd(pCmd);
317 
318 	if (!done) {
319 		while (hpt_sleep(vbus_ext, pCmd, 0, "hptfls", HPT_OSM_TIMEOUT)) {
320 			ldm_reset_vbus(vd->vbus);
321 		}
322 	}
323 
324 	KdPrint(("flush result %d", pCmd->Result));
325 
326 	if (pCmd->Result!=RETURN_SUCCESS)
327 		result = -1;
328 
329 	ldm_free_cmds(pCmd);
330 
331 	hpt_unlock_vbus(vbus_ext);
332 
333 	return result;
334 }
335 
336 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
337 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
338 {
339 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
340 	PHBA hba;
341 	int i;
342 
343 	KdPrint(("hpt_shutdown_vbus"));
344 
345 	/* stop all ctl tasks and disable the worker taskqueue */
346 	hpt_stop_tasks(vbus_ext);
347 	vbus_ext->worker.ta_context = 0;
348 
349 	/* flush devices */
350 	for (i=0; i<osm_max_targets; i++) {
351 		PVDEV vd = ldm_find_target(vbus, i);
352 		if (vd) {
353 			/* retry once */
354 			if (hpt_flush_vdev(vbus_ext, vd))
355 				hpt_flush_vdev(vbus_ext, vd);
356 		}
357 	}
358 
359 	hpt_lock_vbus(vbus_ext);
360 	ldm_shutdown(vbus);
361 	hpt_unlock_vbus(vbus_ext);
362 
363 	ldm_release_vbus(vbus);
364 
365 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
366 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
367 
368 	hpt_free_mem(vbus_ext);
369 
370 	while ((hba=vbus_ext->hba_list)) {
371 		vbus_ext->hba_list = hba->next;
372 		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
373 	}
374 
375 	kfree(vbus_ext, M_DEVBUF);
376 	KdPrint(("hpt_shutdown_vbus done"));
377 }
378 
379 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
380 {
381 	OSM_TASK *tasks;
382 
383 	tasks = vbus_ext->tasks;
384 	vbus_ext->tasks = 0;
385 
386 	while (tasks) {
387 		OSM_TASK *t = tasks;
388 		tasks = t->next;
389 		t->next = 0;
390 		t->func(vbus_ext->vbus, t->data);
391 	}
392 }
393 
394 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
395 {
396 	if(vbus_ext){
397 		hpt_lock_vbus(vbus_ext);
398 		__hpt_do_tasks(vbus_ext);
399 		hpt_unlock_vbus(vbus_ext);
400 	}
401 }
402 
403 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
404 static void hpt_poll(struct cam_sim *sim);
405 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
406 static void hpt_pci_intr(void *arg);
407 
408 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
409 {
410 	POS_CMDEXT p = vbus_ext->cmdext_list;
411 	if (p)
412 		vbus_ext->cmdext_list = p->next;
413 	return p;
414 }
415 
416 static __inline void cmdext_put(POS_CMDEXT p)
417 {
418 	p->next = p->vbus_ext->cmdext_list;
419 	p->vbus_ext->cmdext_list = p;
420 }
421 
422 static void hpt_timeout(void *arg)
423 {
424 	PCOMMAND pCmd = (PCOMMAND)arg;
425 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
426 
427 	KdPrint(("pCmd %p timeout", pCmd));
428 
429 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
430 }
431 
432 static void os_cmddone(PCOMMAND pCmd)
433 {
434 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
435 	union ccb *ccb = ext->ccb;
436 
437 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
438 
439 	callout_stop(&ccb->ccb_h.timeout_ch);
440 
441 	switch(pCmd->Result) {
442 	case RETURN_SUCCESS:
443 		ccb->ccb_h.status = CAM_REQ_CMP;
444 		break;
445 	case RETURN_BAD_DEVICE:
446 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
447 		break;
448 	case RETURN_DEVICE_BUSY:
449 		ccb->ccb_h.status = CAM_BUSY;
450 		break;
451 	case RETURN_INVALID_REQUEST:
452 		ccb->ccb_h.status = CAM_REQ_INVALID;
453 		break;
454 	case RETURN_SELECTION_TIMEOUT:
455 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
456 		break;
457 	case RETURN_RETRY:
458 		ccb->ccb_h.status = CAM_BUSY;
459 		break;
460 	default:
461 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
462 		break;
463 	}
464 
465 	if (pCmd->flags.data_in) {
466 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
467 	}
468 	else if (pCmd->flags.data_out) {
469 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
470 	}
471 
472 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
473 
474 	cmdext_put(ext);
475 	ldm_free_cmds(pCmd);
476 	xpt_done(ccb);
477 }
478 
479 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
480 {
481 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
482 	union ccb *ccb = ext->ccb;
483 	bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
484 	int idx;
485 
486 	if(logical)	{
487 		if (ccb->ccb_h.flags & CAM_DATA_PHYS)
488 			panic("physical address unsupported");
489 
490 		if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
491 			if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
492 				panic("physical address unsupported");
493 
494 			for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
495 				os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr);
496 				pSg[idx].size = sgList[idx].ds_len;
497 				pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
498 			}
499 		}
500 		else {
501 			os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
502 			pSg->size = ccb->csio.dxfer_len;
503 			pSg->eot = 1;
504 		}
505 		return TRUE;
506 	}
507 
508 	/* since we have provided physical sg, nobody will ask us to build physical sg */
509 	HPT_ASSERT(0);
510 	return FALSE;
511 }
512 
513 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
514 {
515 	PCOMMAND pCmd = (PCOMMAND)arg;
516 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
517 	PSG psg = pCmd->psg;
518 	int idx;
519 
520 	HPT_ASSERT(pCmd->flags.physical_sg);
521 
522 	if (error || nsegs == 0)
523 		panic("busdma error");
524 
525 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
526 
527 	for (idx = 0; idx < nsegs; idx++, psg++) {
528 		psg->addr.bus = segs[idx].ds_addr;
529 		psg->size = segs[idx].ds_len;
530 		psg->eot = 0;
531 	}
532 	psg[-1].eot = 1;
533 
534 	if (pCmd->flags.data_in) {
535 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD);
536 	}
537 	else if (pCmd->flags.data_out) {
538 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE);
539 	}
540 
541 	callout_reset(&ext->ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
542 	ldm_queue_cmd(pCmd);
543 }
544 
545 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
546 {
547 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
548 	PVDEV vd;
549 	PCOMMAND pCmd;
550 	POS_CMDEXT ext;
551 	HPT_U8 *cdb;
552 
553 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
554 		cdb = ccb->csio.cdb_io.cdb_ptr;
555 	else
556 		cdb = ccb->csio.cdb_io.cdb_bytes;
557 
558 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
559 		ccb,
560 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
561 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
562 	));
563 
564 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
565 	if (ccb->ccb_h.target_lun != 0 ||
566 		ccb->ccb_h.target_id >= osm_max_targets ||
567 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
568 	{
569 		ccb->ccb_h.status = CAM_TID_INVALID;
570 		xpt_done(ccb);
571 		return;
572 	}
573 
574 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
575 
576 	if (!vd) {
577 		ccb->ccb_h.status = CAM_TID_INVALID;
578 		xpt_done(ccb);
579 		return;
580 	}
581 
582 	switch (cdb[0]) {
583 	case TEST_UNIT_READY:
584 	case START_STOP_UNIT:
585 	case SYNCHRONIZE_CACHE:
586 		ccb->ccb_h.status = CAM_REQ_CMP;
587 		break;
588 
589 	case INQUIRY:
590 		{
591 			PINQUIRYDATA inquiryData;
592 			memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
593 			inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
594 
595 			inquiryData->AdditionalLength = 31;
596 			inquiryData->CommandQueue = 1;
597 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
598 			memcpy(&inquiryData->ProductId, "DISK 0_0        ", 16);
599 
600 			if (vd->target_id / 10) {
601 				inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
602 				inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
603 			}
604 			else
605 				inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
606 
607 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
608 
609 			ccb->ccb_h.status = CAM_REQ_CMP;
610 		}
611 		break;
612 
613 	case READ_CAPACITY:
614 	{
615 		HPT_U8 *rbuf = ccb->csio.data_ptr;
616 		HPT_U32 cap;
617 
618 		if (vd->capacity>0xfffffffful)
619 			cap = 0xfffffffful;
620 		else
621 			cap = vd->capacity - 1;
622 
623 		rbuf[0] = (HPT_U8)(cap>>24);
624 		rbuf[1] = (HPT_U8)(cap>>16);
625 		rbuf[2] = (HPT_U8)(cap>>8);
626 		rbuf[3] = (HPT_U8)cap;
627 		rbuf[4] = 0;
628 		rbuf[5] = 0;
629 		rbuf[6] = 2;
630 		rbuf[7] = 0;
631 
632 		ccb->ccb_h.status = CAM_REQ_CMP;
633 		break;
634 	}
635 
636 	case SERVICE_ACTION_IN:
637 	{
638 		HPT_U8 *rbuf = ccb->csio.data_ptr;
639 		HPT_U64	cap = vd->capacity - 1;
640 
641 		rbuf[0] = (HPT_U8)(cap>>56);
642 		rbuf[1] = (HPT_U8)(cap>>48);
643 		rbuf[2] = (HPT_U8)(cap>>40);
644 		rbuf[3] = (HPT_U8)(cap>>32);
645 		rbuf[4] = (HPT_U8)(cap>>24);
646 		rbuf[5] = (HPT_U8)(cap>>16);
647 		rbuf[6] = (HPT_U8)(cap>>8);
648 		rbuf[7] = (HPT_U8)cap;
649 		rbuf[8] = 0;
650 		rbuf[9] = 0;
651 		rbuf[10] = 2;
652 		rbuf[11] = 0;
653 
654 		ccb->ccb_h.status = CAM_REQ_CMP;
655 		break;
656 	}
657 
658 	case READ_6:
659 	case READ_10:
660 	case READ_16:
661 	case WRITE_6:
662 	case WRITE_10:
663 	case WRITE_16:
664 	case 0x13:
665 	case 0x2f:
666 	{
667 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
668 		if(!pCmd){
669 			KdPrint(("Failed to allocate command!"));
670 			ccb->ccb_h.status = CAM_BUSY;
671 			break;
672 		}
673 
674 		switch (cdb[0])	{
675 		case READ_6:
676 		case WRITE_6:
677 		case 0x13:
678 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
679 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
680 			break;
681 		case READ_16:
682 		case WRITE_16:
683 		{
684 			HPT_U64 block =
685 				((HPT_U64)cdb[2]<<56) |
686 				((HPT_U64)cdb[3]<<48) |
687 				((HPT_U64)cdb[4]<<40) |
688 				((HPT_U64)cdb[5]<<32) |
689 				((HPT_U64)cdb[6]<<24) |
690 				((HPT_U64)cdb[7]<<16) |
691 				((HPT_U64)cdb[8]<<8) |
692 				((HPT_U64)cdb[9]);
693 			pCmd->uCmd.Ide.Lba = block;
694 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
695 			break;
696 		}
697 
698 		default:
699 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
700 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
701 			break;
702 		}
703 
704 		switch (cdb[0]) {
705 		case READ_6:
706 		case READ_10:
707 		case READ_16:
708 			pCmd->flags.data_in = 1;
709 			break;
710 		case WRITE_6:
711 		case WRITE_10:
712 		case WRITE_16:
713 			pCmd->flags.data_out = 1;
714 			break;
715 		}
716 		pCmd->priv = ext = cmdext_get(vbus_ext);
717 		HPT_ASSERT(ext);
718 		ext->ccb = ccb;
719 		pCmd->target = vd;
720 		pCmd->done = os_cmddone;
721 		pCmd->buildsgl = os_buildsgl;
722 
723 		pCmd->psg = ext->psg;
724 
725 		if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
726 			int idx;
727 			bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
728 
729 			if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
730 				pCmd->flags.physical_sg = 1;
731 
732 			for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
733 				pCmd->psg[idx].addr.bus = sgList[idx].ds_addr;
734 				pCmd->psg[idx].size = sgList[idx].ds_len;
735 				pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
736 			}
737 
738 			callout_reset(&ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
739 			ldm_queue_cmd(pCmd);
740 		}
741 		else {
742 			int error;
743 			pCmd->flags.physical_sg = 1;
744 			error = bus_dmamap_load(vbus_ext->io_dmat,
745 						ext->dma_map,
746 						ccb->csio.data_ptr, ccb->csio.dxfer_len,
747 						hpt_io_dmamap_callback, pCmd,
748 					BUS_DMA_WAITOK
749 					);
750 			KdPrint(("bus_dmamap_load return %d", error));
751 			if (error && error!=EINPROGRESS) {
752 				os_printk("bus_dmamap_load error %d", error);
753 				cmdext_put(ext);
754 				ldm_free_cmds(pCmd);
755 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
756 				xpt_done(ccb);
757 			}
758 		}
759 		return;
760 	}
761 
762 	default:
763 		ccb->ccb_h.status = CAM_REQ_INVALID;
764 		break;
765 	}
766 
767 	xpt_done(ccb);
768 	return;
769 }
770 
771 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
772 {
773 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
774 
775 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
776 
777 	switch (ccb->ccb_h.func_code) {
778 
779 	case XPT_SCSI_IO:
780 		hpt_lock_vbus(vbus_ext);
781 		hpt_scsi_io(vbus_ext, ccb);
782 		hpt_unlock_vbus(vbus_ext);
783 		return;
784 
785 	case XPT_RESET_BUS:
786 		hpt_lock_vbus(vbus_ext);
787 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
788 		hpt_unlock_vbus(vbus_ext);
789 		break;
790 
791 	case XPT_GET_TRAN_SETTINGS:
792 	case XPT_SET_TRAN_SETTINGS:
793 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
794 		break;
795 
796 	case XPT_CALC_GEOMETRY:
797 		cam_calc_geometry(&ccb->ccg, 1);
798 		break;
799 
800 	case XPT_PATH_INQ:
801 	{
802 		struct ccb_pathinq *cpi = &ccb->cpi;
803 
804 		cpi->version_num = 1;
805 		cpi->hba_inquiry = PI_SDTR_ABLE;
806 		cpi->target_sprt = 0;
807 		cpi->hba_misc = PIM_NOBUSRESET;
808 		cpi->hba_eng_cnt = 0;
809 		cpi->max_target = osm_max_targets;
810 		cpi->max_lun = 0;
811 		cpi->unit_number = cam_sim_unit(sim);
812 		cpi->bus_id = cam_sim_bus(sim);
813 		cpi->initiator_id = osm_max_targets;
814 		cpi->base_transfer_speed = 3300;
815 
816 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
817 		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
818 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
819 		cpi->transport = XPORT_SPI;
820 		cpi->transport_version = 2;
821 		cpi->protocol = PROTO_SCSI;
822 		cpi->protocol_version = SCSI_REV_2;
823 		cpi->maxio = HPTRR_DFLTPHYS;
824 		cpi->ccb_h.status = CAM_REQ_CMP;
825 		break;
826 	}
827 
828 	default:
829 		ccb->ccb_h.status = CAM_REQ_INVALID;
830 		break;
831 	}
832 
833 	xpt_done(ccb);
834 	return;
835 }
836 
837 static void hpt_pci_intr(void *arg)
838 {
839 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
840 	hpt_lock_vbus(vbus_ext);
841 	ldm_intr((PVBUS)vbus_ext->vbus);
842 	hpt_unlock_vbus(vbus_ext);
843 }
844 
845 static void hpt_poll(struct cam_sim *sim)
846 {
847 	hpt_pci_intr(cam_sim_softc(sim));
848 }
849 
850 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
851 {
852 	KdPrint(("hpt_async"));
853 }
854 
855 static int hpt_shutdown(device_t dev)
856 {
857 	KdPrint(("hpt_shutdown(dev=%p)", dev));
858 	return 0;
859 }
860 
861 static int hpt_detach(device_t dev)
862 {
863 	/* we don't allow the driver to be unloaded. */
864 	return EBUSY;
865 }
866 
867 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
868 {
869 	arg->ioctl_cmnd = 0;
870 	wakeup(arg);
871 }
872 
873 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
874 {
875 	ioctl_args->result = -1;
876 	ioctl_args->done = hpt_ioctl_done;
877 	ioctl_args->ioctl_cmnd = (void *)1;
878 
879 	hpt_lock_vbus(vbus_ext);
880 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
881 
882 	while (ioctl_args->ioctl_cmnd) {
883 		if (hpt_sleep(vbus_ext, ioctl_args, 0, "hptctl", HPT_OSM_TIMEOUT)==0)
884 			break;
885 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
886 		__hpt_do_tasks(vbus_ext);
887 	}
888 
889 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
890 
891 	hpt_unlock_vbus(vbus_ext);
892 }
893 
894 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
895 {
896 	PVBUS vbus;
897 	PVBUS_EXT vbus_ext;
898 
899 	ldm_for_each_vbus(vbus, vbus_ext) {
900 		__hpt_do_ioctl(vbus_ext, ioctl_args);
901 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
902 			return;
903 	}
904 }
905 
906 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
907 	IOCTL_ARG arg;\
908 	arg.dwIoControlCode = code;\
909 	arg.lpInBuffer = inbuf;\
910 	arg.lpOutBuffer = outbuf;\
911 	arg.nInBufferSize = insize;\
912 	arg.nOutBufferSize = outsize;\
913 	arg.lpBytesReturned = 0;\
914 	hpt_do_ioctl(&arg);\
915 	arg.result;\
916 })
917 
918 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
919 
920 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
921 {
922 	int i;
923 	HPT_U32 count = nMaxCount-1;
924 
925 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
926 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
927 		return -1;
928 
929 	nMaxCount = (int)pIds[0];
930 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
931 	return nMaxCount;
932 }
933 
934 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
935 {
936 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
937 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
938 }
939 
940 /* not belong to this file logically, but we want to use ioctl interface */
941 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
942 {
943 	LOGICAL_DEVICE_INFO_V3 devinfo;
944 	int i, result;
945 	DEVICEID param[2] = { id, 0 };
946 
947 	if (hpt_get_device_info_v3(id, &devinfo))
948 		return -1;
949 
950 	if (devinfo.Type!=LDT_ARRAY)
951 		return -1;
952 
953 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
954 		param[1] = AS_REBUILD_ABORT;
955 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
956 		param[1] = AS_VERIFY_ABORT;
957 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
958 		param[1] = AS_INITIALIZE_ABORT;
959 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
960 		param[1] = AS_TRANSFORM_ABORT;
961 	else
962 		return -1;
963 
964 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
965 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
966 				param, sizeof(param), 0, 0);
967 
968 	for (i=0; i<devinfo.u.array.nDisk; i++)
969 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
970 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
971 
972 	return result;
973 }
974 
975 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
976 {
977 	DEVICEID ids[32];
978 	int i, count;
979 
980 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
981 
982 	for (i=0; i<count; i++)
983 		__hpt_stop_tasks(vbus_ext, ids[i]);
984 }
985 
986 static	d_open_t	hpt_open;
987 static	d_close_t	hpt_close;
988 static	d_ioctl_t	hpt_ioctl;
989 static  int 		hpt_rescan_bus(void);
990 static  void 		hpt_rescan_callback(struct cam_periph *periph, union ccb *ccb);
991 
992 static struct dev_ops hpt_ops = {
993 	{ driver_name, 0, 0 },
994 	.d_open =	hpt_open,
995 	.d_close =	hpt_close,
996 	.d_ioctl =	hpt_ioctl,
997 };
998 
999 static struct intr_config_hook hpt_ich;
1000 
1001 /*
1002  * hpt_final_init will be called after all hpt_attach.
1003  */
1004 static void hpt_final_init(void *dummy)
1005 {
1006 	int       i;
1007 	PVBUS_EXT vbus_ext;
1008 	PVBUS vbus;
1009 	PHBA hba;
1010 
1011 	/* Clear the config hook */
1012 	config_intrhook_disestablish(&hpt_ich);
1013 
1014 	/* allocate memory */
1015 	i = 0;
1016 	ldm_for_each_vbus(vbus, vbus_ext) {
1017 		if (hpt_alloc_mem(vbus_ext)) {
1018 			os_printk("out of memory");
1019 			return;
1020 		}
1021 		i++;
1022 	}
1023 
1024 	if (!i) {
1025 		if (bootverbose)
1026 			os_printk("no controller detected.");
1027 		return;
1028 	}
1029 
1030 	/* initializing hardware */
1031 	ldm_for_each_vbus(vbus, vbus_ext) {
1032 		/* make timer available here */
1033 		callout_init(&vbus_ext->timer);
1034 		if (hpt_init_vbus(vbus_ext)) {
1035 			os_printk("fail to initialize hardware");
1036 			break; /* FIXME */
1037 		}
1038 	}
1039 
1040 	/* register CAM interface */
1041 	ldm_for_each_vbus(vbus, vbus_ext) {
1042 		struct cam_devq *devq;
1043 		struct ccb_setasync	ccb;
1044 
1045 		lockinit(&vbus_ext->lock, "hptsleeplock", 0, LK_CANRECURSE);
1046 		if (bus_dma_tag_create(NULL,/* parent */
1047 				4,	/* alignment */
1048 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1049 				BUS_SPACE_MAXADDR,	/* lowaddr */
1050 				BUS_SPACE_MAXADDR, 	/* highaddr */
1051 				NULL, NULL, 		/* filter, filterarg */
1052 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1053 				os_max_sg_descriptors,	/* nsegments */
1054 				0x10000,	/* maxsegsize */
1055 				BUS_DMA_WAITOK,		/* flags */
1056 				&vbus_ext->io_dmat	/* tag */))
1057 		{
1058 			return ;
1059 		}
1060 
1061 		for (i=0; i<os_max_queue_comm; i++) {
1062 			POS_CMDEXT ext = (POS_CMDEXT)kmalloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1063 			if (!ext) {
1064 				os_printk("Can't alloc cmdext(%d)", i);
1065 				return ;
1066 			}
1067 			ext->vbus_ext = vbus_ext;
1068 			ext->next = vbus_ext->cmdext_list;
1069 			vbus_ext->cmdext_list = ext;
1070 
1071 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1072 				os_printk("Can't create dma map(%d)", i);
1073 				return ;
1074 			}
1075 		}
1076 
1077 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1078 			os_printk("cam_simq_alloc failed");
1079 			return ;
1080 		}
1081 
1082 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1083 				vbus_ext, 0, &sim_mplock, os_max_queue_comm, /*tagged*/8,  devq);
1084 
1085 		if (!vbus_ext->sim) {
1086 			os_printk("cam_sim_alloc failed");
1087 			cam_simq_release(devq);
1088 			return ;
1089 		}
1090 		cam_simq_release(devq);
1091 
1092 		if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) {
1093 			os_printk("xpt_bus_register failed");
1094 			cam_sim_free(vbus_ext->sim);
1095 			vbus_ext->sim = NULL;
1096 			return ;
1097 		}
1098 
1099 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1100 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1101 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1102 		{
1103 			os_printk("xpt_create_path failed");
1104 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1105 			cam_sim_free(vbus_ext->sim);
1106 			vbus_ext->sim = NULL;
1107 			return ;
1108 		}
1109 
1110 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1111 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1112 		ccb.event_enable = AC_LOST_DEVICE;
1113 		ccb.callback = hpt_async;
1114 		ccb.callback_arg = vbus_ext;
1115 		xpt_action((union ccb *)&ccb);
1116 
1117 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1118 			int rid = 0;
1119 			if ((hba->irq_res = bus_alloc_resource(hba->pcidev,
1120 				SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1121 			{
1122 				os_printk("can't allocate interrupt");
1123 				return ;
1124 			}
1125 
1126 			if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
1127 				hpt_pci_intr, vbus_ext, &hba->irq_handle, NULL))
1128 			{
1129 				os_printk("can't set up interrupt");
1130 				return ;
1131 			}
1132 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1133 		}
1134 
1135 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1136 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1137 		if (!vbus_ext->shutdown_eh)
1138 			os_printk("Shutdown event registration failed");
1139 	}
1140 
1141 	ldm_for_each_vbus(vbus, vbus_ext) {
1142 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1143 		if (vbus_ext->tasks)
1144 			TASK_ENQUEUE(&vbus_ext->worker);
1145 	}
1146 
1147 	make_dev(&hpt_ops, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1148 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1149 }
1150 
1151 #if defined(KLD_MODULE)
1152 
1153 typedef struct driverlink *driverlink_t;
1154 struct driverlink {
1155 	kobj_class_t	driver;
1156 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1157 };
1158 
1159 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1160 
1161 struct devclass {
1162 	TAILQ_ENTRY(devclass) link;
1163 	devclass_t	parent;		/* parent in devclass hierarchy */
1164 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1165 	char		*name;
1166 	device_t	*devices;	/* array of devices indexed by unit */
1167 	int		maxunit;	/* size of devices array */
1168 };
1169 
1170 static void override_kernel_driver(void)
1171 {
1172 	driverlink_t dl, dlfirst;
1173 	driver_t *tmpdriver;
1174 	devclass_t dc = devclass_find("pci");
1175 
1176 	if (dc){
1177 		dlfirst = TAILQ_FIRST(&dc->drivers);
1178 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1179 			if(strcmp(dl->driver->name, driver_name) == 0) {
1180 				tmpdriver=dl->driver;
1181 				dl->driver=dlfirst->driver;
1182 				dlfirst->driver=tmpdriver;
1183 				break;
1184 			}
1185 		}
1186 	}
1187 }
1188 
1189 #else
1190 #define override_kernel_driver()
1191 #endif
1192 
1193 static void hpt_init(void *dummy)
1194 {
1195 	if (bootverbose)
1196 		os_printk("%s %s", driver_name_long, driver_ver);
1197 
1198 	override_kernel_driver();
1199 	init_config();
1200 
1201 	hpt_ich.ich_func = hpt_final_init;
1202 	hpt_ich.ich_arg = NULL;
1203 	if (config_intrhook_establish(&hpt_ich) != 0) {
1204 		kprintf("%s: cannot establish configuration hook\n",
1205 		    driver_name_long);
1206 	}
1207 
1208 }
1209 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1210 
1211 /*
1212  * CAM driver interface
1213  */
1214 static device_method_t driver_methods[] = {
1215 	/* Device interface */
1216 	DEVMETHOD(device_probe,		hpt_probe),
1217 	DEVMETHOD(device_attach,	hpt_attach),
1218 	DEVMETHOD(device_detach,	hpt_detach),
1219 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1220 	{ 0, 0 }
1221 };
1222 
1223 static driver_t hpt_pci_driver = {
1224 	driver_name,
1225 	driver_methods,
1226 	sizeof(HBA)
1227 };
1228 
1229 static devclass_t	hpt_devclass;
1230 
1231 #ifndef TARGETNAME
1232 #error "no TARGETNAME found"
1233 #endif
1234 
1235 /* use this to make TARGETNAME be expanded */
1236 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6)
1237 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1238 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1239 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0);
1240 __MODULE_VERSION(TARGETNAME, 1);
1241 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1242 
1243 typedef struct cdev * ioctl_dev_t;
1244 
1245 typedef	struct thread *	ioctl_thread_t;
1246 
1247 static int hpt_open(struct dev_open_args *ap)
1248 {
1249 	return 0;
1250 }
1251 
1252 static int hpt_close(struct dev_close_args *ap)
1253 {
1254 	return 0;
1255 }
1256 
1257 static int hpt_ioctl(struct dev_ioctl_args *ap)
1258 {
1259 	u_long cmd = ap->a_cmd;
1260 	caddr_t data = ap->a_data;
1261 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1262 	IOCTL_ARG ioctl_args;
1263 	HPT_U32 bytesReturned;
1264 
1265 	switch (cmd){
1266 	case HPT_DO_IOCONTROL:
1267 	{
1268 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1269 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1270 				piop->dwIoControlCode,
1271 				piop->lpInBuffer,
1272 				piop->nInBufferSize,
1273 				piop->lpOutBuffer,
1274 				piop->nOutBufferSize));
1275 
1276 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1277 
1278 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1279 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1280 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1281 		ioctl_args.lpBytesReturned = &bytesReturned;
1282 
1283 		if (ioctl_args.nInBufferSize) {
1284 			ioctl_args.lpInBuffer = kmalloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1285 			if (!ioctl_args.lpInBuffer)
1286 				goto invalid;
1287 			if (copyin((void*)piop->lpInBuffer,
1288 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1289 				goto invalid;
1290 		}
1291 
1292 		if (ioctl_args.nOutBufferSize) {
1293 			ioctl_args.lpOutBuffer = kmalloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK);
1294 			if (!ioctl_args.lpOutBuffer)
1295 				goto invalid;
1296 		}
1297 
1298 		get_mplock();
1299 
1300 		hpt_do_ioctl(&ioctl_args);
1301 
1302 		rel_mplock();
1303 
1304 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1305 			if (piop->nOutBufferSize) {
1306 				if (copyout(ioctl_args.lpOutBuffer,
1307 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1308 					goto invalid;
1309 			}
1310 			if (piop->lpBytesReturned) {
1311 				if (copyout(&bytesReturned,
1312 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1313 					goto invalid;
1314 			}
1315 			if (ioctl_args.lpInBuffer) kfree(ioctl_args.lpInBuffer, M_DEVBUF);
1316 			if (ioctl_args.lpOutBuffer) kfree(ioctl_args.lpOutBuffer, M_DEVBUF);
1317 			return 0;
1318 		}
1319 invalid:
1320 		if (ioctl_args.lpInBuffer) kfree(ioctl_args.lpInBuffer, M_DEVBUF);
1321 		if (ioctl_args.lpOutBuffer) kfree(ioctl_args.lpOutBuffer, M_DEVBUF);
1322 		return EFAULT;
1323 	}
1324 	return EFAULT;
1325 	}
1326 
1327 	case HPT_SCAN_BUS:
1328 	{
1329 		return hpt_rescan_bus();
1330 	}
1331 	default:
1332 		KdPrint(("invalid command!"));
1333 		return EFAULT;
1334 	}
1335 
1336 }
1337 
1338 static void	hpt_rescan_callback(struct cam_periph *periph, union ccb *ccb)
1339 {
1340 	xpt_free_path(ccb->ccb_h.path);
1341 	xpt_free_ccb(ccb);
1342 }
1343 
1344 static int	hpt_rescan_bus(void)
1345 {
1346 	union ccb			*ccb;
1347 	PVBUS 				vbus;
1348 	PVBUS_EXT			vbus_ext;
1349 
1350 	get_mplock();
1351 
1352 	ldm_for_each_vbus(vbus, vbus_ext) {
1353 		if ((ccb = xpt_alloc_ccb()) == NULL)
1354 			return(ENOMEM);
1355 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1356 		    cam_sim_path(vbus_ext->sim),
1357 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1358 			xpt_free_ccb(ccb);
1359 			return(EIO);
1360 		}
1361 
1362 		xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1363 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1364 		ccb->ccb_h.cbfcnp = hpt_rescan_callback;
1365 		ccb->crcn.flags = CAM_FLAG_NONE;
1366 		xpt_action(ccb); /* scan is now in progress */
1367 	}
1368 
1369 	return(0);
1370 }
1371