1 /*-
2  * Copyright (c) 2011 HighPoint Technologies, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/hpt27xx/osm_bsd.c,v 1.1 2011/12/28 23:26:58 delphij Exp $
27  */
28 
29 #include <dev/raid/hpt27xx/hpt27xx_config.h>
30 
31 #include <dev/raid/hpt27xx/os_bsd.h>
32 #include <dev/raid/hpt27xx/hptintf.h>
33 
34 static int hpt_probe(device_t dev)
35 {
36 	PCI_ID pci_id;
37 	HIM *him;
38 	int i;
39 	PHBA hba;
40 
41 	for (him = him_list; him; him = him->next) {
42 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
43 			if (him->get_controller_count)
44 				him->get_controller_count(&pci_id,0,0);
45 			if ((pci_get_vendor(dev) == pci_id.vid) &&
46 				(pci_get_device(dev) == pci_id.did)){
47 				KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
48 					pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
49 				));
50 				device_set_desc(dev, him->name);
51 				hba = (PHBA)device_get_softc(dev);
52 				memset(hba, 0, sizeof(HBA));
53 				hba->ext_type = EXT_TYPE_HBA;
54 				hba->ldm_adapter.him = him;
55 				return 0;
56 			}
57 		}
58 	}
59 
60 	return (ENXIO);
61 }
62 
63 static int hpt_attach(device_t dev)
64 {
65 	PHBA hba = (PHBA)device_get_softc(dev);
66 	HIM *him = hba->ldm_adapter.him;
67 	PCI_ID pci_id;
68 	HPT_UINT size;
69 	PVBUS vbus;
70 	PVBUS_EXT vbus_ext;
71 
72 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
73 
74 	pci_enable_busmaster(dev);
75 
76 	pci_id.vid = pci_get_vendor(dev);
77 	pci_id.did = pci_get_device(dev);
78 	pci_id.rev = pci_get_revid(dev);
79 	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
80 
81 	size = him->get_adapter_size(&pci_id);
82 	hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK);
83 	if (!hba->ldm_adapter.him_handle)
84 		return ENXIO;
85 
86 	hba->pcidev = dev;
87 	hba->pciaddr.tree = 0;
88 	hba->pciaddr.bus = pci_get_bus(dev);
89 	hba->pciaddr.device = pci_get_slot(dev);
90 	hba->pciaddr.function = pci_get_function(dev);
91 
92 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
93 		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
94 		return -1;
95 	}
96 
97 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
98 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
99 
100 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
101 		size = ldm_get_vbus_size();
102 		vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
103 		memset(vbus_ext, 0, sizeof(VBUS_EXT));
104 		vbus_ext->ext_type = EXT_TYPE_VBUS;
105 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
106 		ldm_register_adapter(&hba->ldm_adapter);
107 	}
108 
109 	ldm_for_each_vbus(vbus, vbus_ext) {
110 		if (hba->ldm_adapter.vbus==vbus) {
111 			hba->vbus_ext = vbus_ext;
112 			hba->next = vbus_ext->hba_list;
113 			vbus_ext->hba_list = hba;
114 			break;
115 		}
116 	}
117 	return 0;
118 }
119 
120 /*
121  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
122  * but there are some problems currently (alignment, etc).
123  */
124 static __inline void *__get_free_pages(int order)
125 {
126 	/* don't use low memory - other devices may get starved */
127 	return contigmalloc(PAGE_SIZE<<order,
128 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
129 }
130 
131 static __inline void free_pages(void *p, int order)
132 {
133 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
134 }
135 
136 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
137 {
138 	PHBA hba;
139 	struct freelist *f;
140 	HPT_UINT i;
141 	void **p;
142 
143 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
144 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
145 
146 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
147 
148 	for (f=vbus_ext->freelist_head; f; f=f->next) {
149 		KdPrint(("%s: %d*%d=%d bytes",
150 			f->tag, f->count, f->size, f->count*f->size));
151 		for (i=0; i<f->count; i++) {
152 			p = (void **)kmalloc(f->size, M_DEVBUF, M_WAITOK);
153 			if (!p)	return (ENXIO);
154 			*p = f->head;
155 			f->head = p;
156 		}
157 	}
158 
159 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
160 		int order, size, j;
161 
162 		HPT_ASSERT((f->size & (f->alignment-1))==0);
163 
164 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
165 
166 		KdPrint(("%s: %d*%d=%d bytes, order %d",
167 			f->tag, f->count, f->size, f->count*f->size, order));
168 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
169 
170 		for (i=0; i<f->count;) {
171 			p = (void **)__get_free_pages(order);
172 			if (!p) return -1;
173 			for (j = size/f->size; j && i<f->count; i++,j--) {
174 				*p = f->head;
175 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
176 				f->head = p;
177 				p = (void **)((unsigned long)p + f->size);
178 			}
179 		}
180 	}
181 
182 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
183 
184 	for (i=0; i<os_max_cache_pages; i++) {
185 		p = (void **)__get_free_pages(0);
186 		if (!p) return -1;
187 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
188 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
189 	}
190 
191 	return 0;
192 }
193 
194 static void hpt_free_mem(PVBUS_EXT vbus_ext)
195 {
196 	struct freelist *f;
197 	void *p;
198 	int i;
199 	BUS_ADDRESS bus;
200 
201 	for (f=vbus_ext->freelist_head; f; f=f->next) {
202 #if DBG
203 		if (f->count!=f->reserved_count) {
204 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
205 		}
206 #endif
207 		while ((p=freelist_get(f)))
208 			kfree(p, M_DEVBUF);
209 	}
210 
211 	for (i=0; i<os_max_cache_pages; i++) {
212 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
213 		HPT_ASSERT(p);
214 		free_pages(p, 0);
215 	}
216 
217 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
218 		int order, size;
219 #if DBG
220 		if (f->count!=f->reserved_count) {
221 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
222 		}
223 #endif
224 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
225 
226 		while ((p=freelist_get_dma(f, &bus))) {
227 			if (order)
228 				free_pages(p, order);
229 			else {
230 			/* can't free immediately since other blocks in this page may still be in the list */
231 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
232 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
233 			}
234 		}
235 	}
236 
237 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
238 		free_pages(p, 0);
239 }
240 
241 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
242 {
243 	PHBA hba;
244 
245 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
246 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
247 			KdPrint(("fail to initialize %p", hba));
248 			return -1;
249 		}
250 
251 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
252 	return 0;
253 }
254 
255 static void hpt_flush_done(PCOMMAND pCmd)
256 {
257 	PVDEV vd = pCmd->target;
258 
259 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
260 		vd = vd->u.array.transform->target;
261 		HPT_ASSERT(vd);
262 		pCmd->target = vd;
263 		pCmd->Result = RETURN_PENDING;
264 		vdev_queue_cmd(pCmd);
265 		return;
266 	}
267 
268 	*(int *)pCmd->priv = 1;
269 	wakeup(pCmd);
270 }
271 
272 /*
273  * flush a vdev (without retry).
274  */
275 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
276 {
277 	PCOMMAND pCmd;
278 	int result = 0, done;
279 	HPT_UINT count;
280 
281 	KdPrint(("flusing dev %p", vd));
282 
283 	hpt_lock_vbus(vbus_ext);
284 
285 	if (mIsArray(vd->type) && vd->u.array.transform)
286 		count = MAX(vd->u.array.transform->source->cmds_per_request,
287 					vd->u.array.transform->target->cmds_per_request);
288 	else
289 		count = vd->cmds_per_request;
290 
291 	pCmd = ldm_alloc_cmds(vd->vbus, count);
292 
293 	if (!pCmd) {
294 		hpt_unlock_vbus(vbus_ext);
295 		return -1;
296 	}
297 
298 	pCmd->type = CMD_TYPE_FLUSH;
299 	pCmd->flags.hard_flush = 1;
300 	pCmd->target = vd;
301 	pCmd->done = hpt_flush_done;
302 	done = 0;
303 	pCmd->priv = &done;
304 
305 	ldm_queue_cmd(pCmd);
306 
307 	if (!done) {
308 		while (hpt_sleep(vbus_ext, pCmd, 0, "hptfls", HPT_OSM_TIMEOUT)) {
309 			ldm_reset_vbus(vd->vbus);
310 		}
311 	}
312 
313 	KdPrint(("flush result %d", pCmd->Result));
314 
315 	if (pCmd->Result!=RETURN_SUCCESS)
316 		result = -1;
317 
318 	ldm_free_cmds(pCmd);
319 
320 	hpt_unlock_vbus(vbus_ext);
321 
322 	return result;
323 }
324 
325 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
326 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
327 {
328 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
329 	PHBA hba;
330 	int i;
331 
332 	KdPrint(("hpt_shutdown_vbus"));
333 
334 	/* stop all ctl tasks and disable the worker taskqueue */
335 	hpt_stop_tasks(vbus_ext);
336 	vbus_ext->worker.ta_context = NULL;
337 
338 	/* flush devices */
339 	for (i=0; i<osm_max_targets; i++) {
340 		PVDEV vd = ldm_find_target(vbus, i);
341 		if (vd) {
342 			/* retry once */
343 			if (hpt_flush_vdev(vbus_ext, vd))
344 				hpt_flush_vdev(vbus_ext, vd);
345 		}
346 	}
347 
348 	hpt_lock_vbus(vbus_ext);
349 	ldm_shutdown(vbus);
350 	hpt_unlock_vbus(vbus_ext);
351 
352 	ldm_release_vbus(vbus);
353 
354 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
355 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
356 
357 	hpt_free_mem(vbus_ext);
358 
359 	while ((hba=vbus_ext->hba_list)) {
360 		vbus_ext->hba_list = hba->next;
361 		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
362 	}
363 
364 	kfree(vbus_ext, M_DEVBUF);
365 	KdPrint(("hpt_shutdown_vbus done"));
366 }
367 
368 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
369 {
370 	OSM_TASK *tasks;
371 
372 	tasks = vbus_ext->tasks;
373 	vbus_ext->tasks = NULL;
374 
375 	while (tasks) {
376 		OSM_TASK *t = tasks;
377 		tasks = t->next;
378 		t->next = NULL;
379 		t->func(vbus_ext->vbus, t->data);
380 	}
381 }
382 
383 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
384 {
385 	if(vbus_ext){
386 		hpt_lock_vbus(vbus_ext);
387 		__hpt_do_tasks(vbus_ext);
388 		hpt_unlock_vbus(vbus_ext);
389 	}
390 }
391 
392 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
393 static void hpt_poll(struct cam_sim *sim);
394 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
395 static void hpt_pci_intr(void *arg);
396 
397 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
398 {
399 	POS_CMDEXT p = vbus_ext->cmdext_list;
400 	if (p)
401 		vbus_ext->cmdext_list = p->next;
402 	return p;
403 }
404 
405 static __inline void cmdext_put(POS_CMDEXT p)
406 {
407 	p->next = p->vbus_ext->cmdext_list;
408 	p->vbus_ext->cmdext_list = p;
409 }
410 
411 static void hpt_timeout(void *arg)
412 {
413 	PCOMMAND pCmd = (PCOMMAND)arg;
414 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
415 
416 	KdPrint(("pCmd %p timeout", pCmd));
417 
418 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
419 }
420 
421 static void os_cmddone(PCOMMAND pCmd)
422 {
423 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
424 	union ccb *ccb = ext->ccb;
425 
426 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
427 
428 	callout_stop(&ccb->ccb_h.timeout_ch);
429 
430 	switch(pCmd->Result) {
431 	case RETURN_SUCCESS:
432 		ccb->ccb_h.status = CAM_REQ_CMP;
433 		break;
434 	case RETURN_BAD_DEVICE:
435 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
436 		break;
437 	case RETURN_DEVICE_BUSY:
438 		ccb->ccb_h.status = CAM_BUSY;
439 		break;
440 	case RETURN_INVALID_REQUEST:
441 		ccb->ccb_h.status = CAM_REQ_INVALID;
442 		break;
443 	case RETURN_SELECTION_TIMEOUT:
444 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
445 		break;
446 	case RETURN_RETRY:
447 		ccb->ccb_h.status = CAM_BUSY;
448 		break;
449 	default:
450 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
451 		break;
452 	}
453 
454 	if (pCmd->flags.data_in) {
455 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
456 	}
457 	else if (pCmd->flags.data_out) {
458 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
459 	}
460 
461 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
462 
463 	cmdext_put(ext);
464 	ldm_free_cmds(pCmd);
465 	xpt_done(ccb);
466 }
467 
468 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
469 {
470 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
471 	union ccb *ccb = ext->ccb;
472 	bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
473 	int idx;
474 
475 	if(logical)	{
476 		if (ccb->ccb_h.flags & CAM_DATA_PHYS)
477 			panic("physical address unsupported");
478 
479 		if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
480 			if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
481 				panic("physical address unsupported");
482 
483 			for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
484 				os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr);
485 				pSg[idx].size = sgList[idx].ds_len;
486 				pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
487 			}
488 		}
489 		else {
490 			os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
491 			pSg->size = ccb->csio.dxfer_len;
492 			pSg->eot = 1;
493 		}
494 		return TRUE;
495 	}
496 
497 	/* since we have provided physical sg, nobody will ask us to build physical sg */
498 	HPT_ASSERT(0);
499 	return FALSE;
500 }
501 
502 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
503 {
504 	PCOMMAND pCmd = (PCOMMAND)arg;
505 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
506 	PSG psg = pCmd->psg;
507 	int idx;
508 
509 	HPT_ASSERT(pCmd->flags.physical_sg);
510 
511 	if (error || nsegs == 0)
512 		panic("busdma error");
513 
514 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
515 
516 	for (idx = 0; idx < nsegs; idx++, psg++) {
517 		psg->addr.bus = segs[idx].ds_addr;
518 		psg->size = segs[idx].ds_len;
519 		psg->eot = 0;
520 	}
521 	psg[-1].eot = 1;
522 
523 	if (pCmd->flags.data_in) {
524 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD);
525 	}
526 	else if (pCmd->flags.data_out) {
527 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE);
528 	}
529 
530 	callout_reset(&ext->ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
531 	ldm_queue_cmd(pCmd);
532 }
533 
534 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
535 {
536 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
537 	PVDEV vd;
538 	PCOMMAND pCmd;
539 	POS_CMDEXT ext;
540 	HPT_U8 *cdb;
541 
542 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
543 		cdb = ccb->csio.cdb_io.cdb_ptr;
544 	else
545 		cdb = ccb->csio.cdb_io.cdb_bytes;
546 
547 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
548 		ccb,
549 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
550 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
551 	));
552 
553 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
554 	if (ccb->ccb_h.target_lun != 0 ||
555 		ccb->ccb_h.target_id >= osm_max_targets ||
556 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
557 	{
558 		ccb->ccb_h.status = CAM_TID_INVALID;
559 		xpt_done(ccb);
560 		return;
561 	}
562 
563 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
564 
565 	if (!vd) {
566 		ccb->ccb_h.status = CAM_TID_INVALID;
567 		xpt_done(ccb);
568 		return;
569 	}
570 
571 	switch (cdb[0]) {
572 	case TEST_UNIT_READY:
573 	case START_STOP_UNIT:
574 	case SYNCHRONIZE_CACHE:
575 		ccb->ccb_h.status = CAM_REQ_CMP;
576 		break;
577 
578 	case INQUIRY:
579 		{
580 			PINQUIRYDATA inquiryData;
581 			memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
582 			inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
583 
584 			inquiryData->AdditionalLength = 31;
585 			inquiryData->CommandQueue = 1;
586 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
587 			memcpy(&inquiryData->ProductId, "DISK 0_0        ", 16);
588 
589 			if (vd->target_id / 10) {
590 				inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
591 				inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
592 			}
593 			else
594 				inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
595 
596 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
597 
598 			ccb->ccb_h.status = CAM_REQ_CMP;
599 		}
600 		break;
601 
602 	case READ_CAPACITY:
603 	{
604 		HPT_U8 *rbuf = ccb->csio.data_ptr;
605 		HPT_U32 cap;
606 
607 		if (vd->capacity>0xfffffffful)
608 			cap = 0xfffffffful;
609 		else
610 			cap = vd->capacity - 1;
611 
612 		rbuf[0] = (HPT_U8)(cap>>24);
613 		rbuf[1] = (HPT_U8)(cap>>16);
614 		rbuf[2] = (HPT_U8)(cap>>8);
615 		rbuf[3] = (HPT_U8)cap;
616 		rbuf[4] = 0;
617 		rbuf[5] = 0;
618 		rbuf[6] = 2;
619 		rbuf[7] = 0;
620 
621 		ccb->ccb_h.status = CAM_REQ_CMP;
622 		break;
623 	}
624 
625 	case SERVICE_ACTION_IN:
626 	{
627 		HPT_U8 *rbuf = ccb->csio.data_ptr;
628 		HPT_U64	cap = vd->capacity - 1;
629 
630 		rbuf[0] = (HPT_U8)(cap>>56);
631 		rbuf[1] = (HPT_U8)(cap>>48);
632 		rbuf[2] = (HPT_U8)(cap>>40);
633 		rbuf[3] = (HPT_U8)(cap>>32);
634 		rbuf[4] = (HPT_U8)(cap>>24);
635 		rbuf[5] = (HPT_U8)(cap>>16);
636 		rbuf[6] = (HPT_U8)(cap>>8);
637 		rbuf[7] = (HPT_U8)cap;
638 		rbuf[8] = 0;
639 		rbuf[9] = 0;
640 		rbuf[10] = 2;
641 		rbuf[11] = 0;
642 
643 		ccb->ccb_h.status = CAM_REQ_CMP;
644 		break;
645 	}
646 
647 	case READ_6:
648 	case READ_10:
649 	case READ_16:
650 	case WRITE_6:
651 	case WRITE_10:
652 	case WRITE_16:
653 	case 0x13:
654 	case 0x2f:
655 	case 0x8f: /* VERIFY_16 */
656 	{
657 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
658 		if(!pCmd){
659 			KdPrint(("Failed to allocate command!"));
660 			ccb->ccb_h.status = CAM_BUSY;
661 			break;
662 		}
663 
664 		switch (cdb[0])	{
665 		case READ_6:
666 		case WRITE_6:
667 		case 0x13:
668 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
669 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
670 			break;
671 		case READ_16:
672 		case WRITE_16:
673 		case 0x8f: /* VERIFY_16 */
674 		{
675 			HPT_U64 block =
676 				((HPT_U64)cdb[2]<<56) |
677 				((HPT_U64)cdb[3]<<48) |
678 				((HPT_U64)cdb[4]<<40) |
679 				((HPT_U64)cdb[5]<<32) |
680 				((HPT_U64)cdb[6]<<24) |
681 				((HPT_U64)cdb[7]<<16) |
682 				((HPT_U64)cdb[8]<<8) |
683 				((HPT_U64)cdb[9]);
684 			pCmd->uCmd.Ide.Lba = block;
685 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
686 			break;
687 		}
688 
689 		default:
690 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
691 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
692 			break;
693 		}
694 
695 		switch (cdb[0]) {
696 		case READ_6:
697 		case READ_10:
698 		case READ_16:
699 			pCmd->flags.data_in = 1;
700 			break;
701 		case WRITE_6:
702 		case WRITE_10:
703 		case WRITE_16:
704 			pCmd->flags.data_out = 1;
705 			break;
706 		}
707 		pCmd->priv = ext = cmdext_get(vbus_ext);
708 		HPT_ASSERT(ext);
709 		ext->ccb = ccb;
710 		pCmd->target = vd;
711 		pCmd->done = os_cmddone;
712 		pCmd->buildsgl = os_buildsgl;
713 
714 		pCmd->psg = ext->psg;
715 
716 		if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
717 			int idx;
718 			bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
719 
720 			if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
721 				pCmd->flags.physical_sg = 1;
722 
723 			for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
724 				pCmd->psg[idx].addr.bus = sgList[idx].ds_addr;
725 				pCmd->psg[idx].size = sgList[idx].ds_len;
726 				pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
727 			}
728 
729 			callout_reset(&ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
730 			ldm_queue_cmd(pCmd);
731 		}
732 		else {
733 			int error;
734 			pCmd->flags.physical_sg = 1;
735 			error = bus_dmamap_load(vbus_ext->io_dmat,
736 						ext->dma_map,
737 						ccb->csio.data_ptr, ccb->csio.dxfer_len,
738 						hpt_io_dmamap_callback, pCmd,
739 					BUS_DMA_WAITOK
740 					);
741 			KdPrint(("bus_dmamap_load return %d", error));
742 			if (error && error!=EINPROGRESS) {
743 				os_printk("bus_dmamap_load error %d", error);
744 				cmdext_put(ext);
745 				ldm_free_cmds(pCmd);
746 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
747 				xpt_done(ccb);
748 			}
749 		}
750 		return;
751 	}
752 
753 	default:
754 		ccb->ccb_h.status = CAM_REQ_INVALID;
755 		break;
756 	}
757 
758 	xpt_done(ccb);
759 	return;
760 }
761 
762 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
763 {
764 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
765 
766 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
767 
768 	switch (ccb->ccb_h.func_code) {
769 
770 	case XPT_SCSI_IO:
771 		hpt_lock_vbus(vbus_ext);
772 		hpt_scsi_io(vbus_ext, ccb);
773 		hpt_unlock_vbus(vbus_ext);
774 		return;
775 
776 	case XPT_RESET_BUS:
777 		hpt_lock_vbus(vbus_ext);
778 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
779 		hpt_unlock_vbus(vbus_ext);
780 		break;
781 
782 	case XPT_GET_TRAN_SETTINGS:
783 	case XPT_SET_TRAN_SETTINGS:
784 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
785 		break;
786 
787 	case XPT_CALC_GEOMETRY:
788 		ccb->ccg.heads = 255;
789 		ccb->ccg.secs_per_track = 63;
790 		ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
791 		ccb->ccb_h.status = CAM_REQ_CMP;
792 		break;
793 
794 	case XPT_PATH_INQ:
795 	{
796 		struct ccb_pathinq *cpi = &ccb->cpi;
797 
798 		cpi->version_num = 1;
799 		cpi->hba_inquiry = PI_SDTR_ABLE;
800 		cpi->target_sprt = 0;
801 		cpi->hba_misc = PIM_NOBUSRESET;
802 		cpi->hba_eng_cnt = 0;
803 		cpi->max_target = osm_max_targets;
804 		cpi->max_lun = 0;
805 		cpi->unit_number = cam_sim_unit(sim);
806 		cpi->bus_id = cam_sim_bus(sim);
807 		cpi->initiator_id = osm_max_targets;
808 		cpi->base_transfer_speed = 3300;
809 
810 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
811 		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
812 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
813 		cpi->transport = XPORT_SPI;
814 		cpi->transport_version = 2;
815 		cpi->protocol = PROTO_SCSI;
816 		cpi->protocol_version = SCSI_REV_2;
817 		cpi->maxio = HPT27XX_DFLTPHYS;
818 		cpi->ccb_h.status = CAM_REQ_CMP;
819 		break;
820 	}
821 
822 	default:
823 		ccb->ccb_h.status = CAM_REQ_INVALID;
824 		break;
825 	}
826 
827 	xpt_done(ccb);
828 	return;
829 }
830 
831 static void hpt_pci_intr(void *arg)
832 {
833 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
834 	hpt_lock_vbus(vbus_ext);
835 	ldm_intr((PVBUS)vbus_ext->vbus);
836 	hpt_unlock_vbus(vbus_ext);
837 }
838 
839 static void hpt_poll(struct cam_sim *sim)
840 {
841 	hpt_pci_intr(cam_sim_softc(sim));
842 }
843 
844 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
845 {
846 	KdPrint(("hpt_async"));
847 }
848 
849 static int hpt_shutdown(device_t dev)
850 {
851 	KdPrint(("hpt_shutdown(dev=%p)", dev));
852 	return 0;
853 }
854 
855 static int hpt_detach(device_t dev)
856 {
857 	/* we don't allow the driver to be unloaded. */
858 	return EBUSY;
859 }
860 
861 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
862 {
863 	arg->ioctl_cmnd = NULL;
864 	wakeup(arg);
865 }
866 
867 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
868 {
869 	ioctl_args->result = -1;
870 	ioctl_args->done = hpt_ioctl_done;
871 	ioctl_args->ioctl_cmnd = (void *)1;
872 
873 	hpt_lock_vbus(vbus_ext);
874 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
875 
876 	while (ioctl_args->ioctl_cmnd) {
877 		if (hpt_sleep(vbus_ext, ioctl_args, 0, "hptctl", HPT_OSM_TIMEOUT)==0)
878 			break;
879 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
880 		__hpt_do_tasks(vbus_ext);
881 	}
882 
883 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
884 
885 	hpt_unlock_vbus(vbus_ext);
886 }
887 
888 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
889 {
890 	PVBUS vbus;
891 	PVBUS_EXT vbus_ext;
892 
893 	ldm_for_each_vbus(vbus, vbus_ext) {
894 		__hpt_do_ioctl(vbus_ext, ioctl_args);
895 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
896 			return;
897 	}
898 }
899 
900 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
901 	IOCTL_ARG arg;\
902 	arg.dwIoControlCode = code;\
903 	arg.lpInBuffer = inbuf;\
904 	arg.lpOutBuffer = outbuf;\
905 	arg.nInBufferSize = insize;\
906 	arg.nOutBufferSize = outsize;\
907 	arg.lpBytesReturned = NULL;\
908 	hpt_do_ioctl(&arg);\
909 	arg.result;\
910 })
911 
912 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
913 
914 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
915 {
916 	int i;
917 	HPT_U32 count = nMaxCount-1;
918 
919 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
920 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
921 		return -1;
922 
923 	nMaxCount = (int)pIds[0];
924 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
925 	return nMaxCount;
926 }
927 
928 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
929 {
930 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
931 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
932 }
933 
934 /* not belong to this file logically, but we want to use ioctl interface */
935 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
936 {
937 	LOGICAL_DEVICE_INFO_V3 devinfo;
938 	int i, result;
939 	DEVICEID param[2] = { id, 0 };
940 
941 	if (hpt_get_device_info_v3(id, &devinfo))
942 		return -1;
943 
944 	if (devinfo.Type!=LDT_ARRAY)
945 		return -1;
946 
947 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
948 		param[1] = AS_REBUILD_ABORT;
949 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
950 		param[1] = AS_VERIFY_ABORT;
951 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
952 		param[1] = AS_INITIALIZE_ABORT;
953 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
954 		param[1] = AS_TRANSFORM_ABORT;
955 	else
956 		return -1;
957 
958 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
959 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
960 				param, sizeof(param), 0, 0);
961 
962 	for (i=0; i<devinfo.u.array.nDisk; i++)
963 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
964 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
965 
966 	return result;
967 }
968 
969 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
970 {
971 	DEVICEID ids[32];
972 	int i, count;
973 
974 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
975 
976 	for (i=0; i<count; i++)
977 		__hpt_stop_tasks(vbus_ext, ids[i]);
978 }
979 
980 static	d_open_t	hpt_open;
981 static	d_close_t	hpt_close;
982 static	d_ioctl_t	hpt_ioctl;
983 static	void		hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
984 static  int 		hpt_rescan_bus(void);
985 
986 static struct dev_ops hpt_ops = {
987 	{ driver_name, 0, 0 },
988 	.d_open =	hpt_open,
989 	.d_close =	hpt_close,
990 	.d_ioctl =	hpt_ioctl,
991 };
992 
993 static struct intr_config_hook hpt_ich;
994 
995 /*
996  * hpt_final_init will be called after all hpt_attach.
997  */
998 static void hpt_final_init(void *dummy)
999 {
1000 	int       i;
1001 	PVBUS_EXT vbus_ext;
1002 	PVBUS vbus;
1003 	PHBA hba;
1004 
1005 	/* Clear the config hook */
1006 	config_intrhook_disestablish(&hpt_ich);
1007 
1008 	/* allocate memory */
1009 	i = 0;
1010 	ldm_for_each_vbus(vbus, vbus_ext) {
1011 		if (hpt_alloc_mem(vbus_ext)) {
1012 			os_printk("out of memory");
1013 			return;
1014 		}
1015 		i++;
1016 	}
1017 
1018 	if (!i) {
1019 			os_printk("no controller detected.");
1020 		return;
1021 	}
1022 
1023 	/* initializing hardware */
1024 	ldm_for_each_vbus(vbus, vbus_ext) {
1025 		/* make timer available here */
1026 		callout_init(&vbus_ext->timer);
1027 		if (hpt_init_vbus(vbus_ext)) {
1028 			os_printk("fail to initialize hardware");
1029 			break; /* FIXME */
1030 		}
1031 	}
1032 
1033 	/* register CAM interface */
1034 	ldm_for_each_vbus(vbus, vbus_ext) {
1035 		struct cam_devq *devq;
1036 		struct ccb_setasync	ccb;
1037 
1038 		lockinit(&vbus_ext->lock, "hptsleeplock", 0, LK_CANRECURSE);
1039 		if (bus_dma_tag_create(NULL,/* parent */
1040 				4,	/* alignment */
1041 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1042 				BUS_SPACE_MAXADDR,	/* lowaddr */
1043 				BUS_SPACE_MAXADDR, 	/* highaddr */
1044 				NULL, NULL, 		/* filter, filterarg */
1045 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1046 				os_max_sg_descriptors,	/* nsegments */
1047 				0x10000,	/* maxsegsize */
1048 				BUS_DMA_WAITOK,		/* flags */
1049 				&vbus_ext->io_dmat	/* tag */))
1050 		{
1051 			return ;
1052 		}
1053 
1054 		for (i=0; i<os_max_queue_comm; i++) {
1055 			POS_CMDEXT ext = (POS_CMDEXT)kmalloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1056 			if (!ext) {
1057 				os_printk("Can't alloc cmdext(%d)", i);
1058 				return ;
1059 			}
1060 			ext->vbus_ext = vbus_ext;
1061 			ext->next = vbus_ext->cmdext_list;
1062 			vbus_ext->cmdext_list = ext;
1063 
1064 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1065 				os_printk("Can't create dma map(%d)", i);
1066 				return ;
1067 			}
1068 		}
1069 
1070 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1071 			os_printk("cam_simq_alloc failed");
1072 			return ;
1073 		}
1074 
1075 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1076 				vbus_ext, 0, &sim_mplock, os_max_queue_comm, /*tagged*/8,  devq);
1077 		cam_simq_release(devq);
1078 
1079 		if (!vbus_ext->sim) {
1080 			os_printk("cam_sim_alloc failed");
1081 			return ;
1082 		}
1083 
1084 		if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) {
1085 			os_printk("xpt_bus_register failed");
1086 			cam_sim_free(vbus_ext->sim);
1087 			vbus_ext->sim = NULL;
1088 			return ;
1089 		}
1090 
1091 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1092 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1093 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1094 		{
1095 			os_printk("xpt_create_path failed");
1096 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1097 			cam_sim_free(vbus_ext->sim);
1098 			vbus_ext->sim = NULL;
1099 			return ;
1100 		}
1101 
1102 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1103 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1104 		ccb.event_enable = AC_LOST_DEVICE;
1105 		ccb.callback = hpt_async;
1106 		ccb.callback_arg = vbus_ext;
1107 		xpt_action((union ccb *)&ccb);
1108 
1109 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1110 			int rid = 0;
1111 			if ((hba->irq_res = bus_alloc_resource(hba->pcidev,
1112 				SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1113 			{
1114 				os_printk("can't allocate interrupt");
1115 				return ;
1116 			}
1117 
1118 			if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
1119 				hpt_pci_intr, vbus_ext, &hba->irq_handle, NULL))
1120 			{
1121 				os_printk("can't set up interrupt");
1122 				return ;
1123 			}
1124 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1125 
1126 		}
1127 
1128 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1129 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1130 		if (!vbus_ext->shutdown_eh)
1131 			os_printk("Shutdown event registration failed");
1132 	}
1133 
1134 	ldm_for_each_vbus(vbus, vbus_ext) {
1135 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1136 		if (vbus_ext->tasks)
1137 			TASK_ENQUEUE(&vbus_ext->worker);
1138 	}
1139 
1140 	make_dev(&hpt_ops, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1141 	    S_IRUSR | S_IWUSR, driver_name);
1142 }
1143 
1144 #if defined(KLD_MODULE) && (__FreeBSD_version >= 503000)
1145 
1146 typedef struct driverlink *driverlink_t;
1147 struct driverlink {
1148 	kobj_class_t	driver;
1149 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1150 };
1151 
1152 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1153 
1154 struct devclass {
1155 	TAILQ_ENTRY(devclass) link;
1156 	devclass_t	parent;		/* parent in devclass hierarchy */
1157 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1158 	char		*name;
1159 	device_t	*devices;	/* array of devices indexed by unit */
1160 	int		maxunit;	/* size of devices array */
1161 };
1162 
1163 static void override_kernel_driver(void)
1164 {
1165 	driverlink_t dl, dlfirst;
1166 	driver_t *tmpdriver;
1167 	devclass_t dc = devclass_find("pci");
1168 
1169 	if (dc){
1170 		dlfirst = TAILQ_FIRST(&dc->drivers);
1171 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1172 			if(strcmp(dl->driver->name, driver_name) == 0) {
1173 				tmpdriver=dl->driver;
1174 				dl->driver=dlfirst->driver;
1175 				dlfirst->driver=tmpdriver;
1176 				break;
1177 			}
1178 		}
1179 	}
1180 }
1181 
1182 #else
1183 #define override_kernel_driver()
1184 #endif
1185 
1186 static void hpt_init(void *dummy)
1187 {
1188 	if (bootverbose)
1189 		os_printk("%s %s", driver_name_long, driver_ver);
1190 
1191 	override_kernel_driver();
1192 	init_config();
1193 
1194 	hpt_ich.ich_func = hpt_final_init;
1195 	hpt_ich.ich_arg = NULL;
1196 	hpt_ich.ich_desc = "hpt27xx";
1197 	if (config_intrhook_establish(&hpt_ich) != 0) {
1198 		kprintf("%s: cannot establish configuration hook\n",
1199 		    driver_name_long);
1200 	}
1201 
1202 }
1203 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1204 
1205 /*
1206  * CAM driver interface
1207  */
1208 static device_method_t driver_methods[] = {
1209 	/* Device interface */
1210 	DEVMETHOD(device_probe,		hpt_probe),
1211 	DEVMETHOD(device_attach,	hpt_attach),
1212 	DEVMETHOD(device_detach,	hpt_detach),
1213 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1214 	DEVMETHOD_END
1215 };
1216 
1217 static driver_t hpt_pci_driver = {
1218 	driver_name,
1219 	driver_methods,
1220 	sizeof(HBA)
1221 };
1222 
1223 static devclass_t	hpt_devclass;
1224 
1225 #ifndef TARGETNAME
1226 #error "no TARGETNAME found"
1227 #endif
1228 
1229 /* use this to make TARGETNAME be expanded */
1230 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6)
1231 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1232 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1233 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, NULL, NULL);
1234 __MODULE_VERSION(TARGETNAME, 1);
1235 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1236 
1237 typedef struct cdev * ioctl_dev_t;
1238 
1239 typedef	struct thread *	ioctl_thread_t;
1240 
1241 static int hpt_open(struct dev_open_args *ap)
1242 {
1243 	return 0;
1244 }
1245 
1246 static int hpt_close(struct dev_close_args *ap)
1247 {
1248 	return 0;
1249 }
1250 
1251 static int hpt_ioctl(struct dev_ioctl_args *ap)
1252 {
1253 	u_long cmd = ap->a_cmd;
1254 	caddr_t data = ap->a_data;
1255 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1256 	IOCTL_ARG ioctl_args;
1257 	HPT_U32 bytesReturned;
1258 
1259 	switch (cmd){
1260 	case HPT_DO_IOCONTROL:
1261 	{
1262 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1263 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1264 				piop->dwIoControlCode,
1265 				piop->lpInBuffer,
1266 				piop->nInBufferSize,
1267 				piop->lpOutBuffer,
1268 				piop->nOutBufferSize));
1269 
1270 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1271 
1272 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1273 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1274 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1275 		ioctl_args.lpBytesReturned = &bytesReturned;
1276 
1277 		if (ioctl_args.nInBufferSize) {
1278 			ioctl_args.lpInBuffer = kmalloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1279 			if (!ioctl_args.lpInBuffer)
1280 				goto invalid;
1281 			if (copyin((void*)piop->lpInBuffer,
1282 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1283 				goto invalid;
1284 		}
1285 
1286 		if (ioctl_args.nOutBufferSize) {
1287 			ioctl_args.lpOutBuffer = kmalloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK);
1288 			if (!ioctl_args.lpOutBuffer)
1289 				goto invalid;
1290 		}
1291 
1292 		get_mplock();
1293 
1294 		hpt_do_ioctl(&ioctl_args);
1295 
1296 		rel_mplock();
1297 
1298 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1299 			if (piop->nOutBufferSize) {
1300 				if (copyout(ioctl_args.lpOutBuffer,
1301 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1302 					goto invalid;
1303 			}
1304 			if (piop->lpBytesReturned) {
1305 				if (copyout(&bytesReturned,
1306 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1307 					goto invalid;
1308 			}
1309 			if (ioctl_args.lpInBuffer) kfree(ioctl_args.lpInBuffer, M_DEVBUF);
1310 			if (ioctl_args.lpOutBuffer) kfree(ioctl_args.lpOutBuffer, M_DEVBUF);
1311 			return 0;
1312 		}
1313 invalid:
1314 		if (ioctl_args.lpInBuffer) kfree(ioctl_args.lpInBuffer, M_DEVBUF);
1315 		if (ioctl_args.lpOutBuffer) kfree(ioctl_args.lpOutBuffer, M_DEVBUF);
1316 		return EFAULT;
1317 	}
1318 	return EFAULT;
1319 	}
1320 
1321 	case HPT_SCAN_BUS:
1322 	{
1323 		return hpt_rescan_bus();
1324 	}
1325 	default:
1326 		KdPrint(("invalid command!"));
1327 		return EFAULT;
1328 	}
1329 
1330 }
1331 
1332 static int	hpt_rescan_bus(void)
1333 {
1334 	struct cam_path		*path;
1335 	union ccb			*ccb;
1336 	PVBUS 				vbus;
1337 	PVBUS_EXT			vbus_ext;
1338 
1339 	get_mplock();
1340 
1341 	ldm_for_each_vbus(vbus, vbus_ext) {
1342 		if (xpt_create_path(&path, xpt_periph, cam_sim_path(vbus_ext->sim),
1343 			CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1344 			rel_mplock();
1345 			return(EIO);
1346 		}
1347 		ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK);
1348 		bzero(ccb, sizeof(union ccb));
1349 		xpt_setup_ccb(&ccb->ccb_h, path, 5);
1350 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1351 		ccb->ccb_h.cbfcnp = hpt_bus_scan_cb;
1352 		ccb->crcn.flags = CAM_FLAG_NONE;
1353 		xpt_action(ccb);
1354 	}
1355 
1356 	rel_mplock();
1357 
1358 	return(0);
1359 }
1360 
1361 static	void	hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
1362 {
1363 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1364 		KdPrint(("cam_scan_callback: failure status = %x",ccb->ccb_h.status));
1365 	else
1366 		KdPrint(("Scan bus successfully!"));
1367 
1368 	xpt_free_path(ccb->ccb_h.path);
1369 	kfree(ccb, M_TEMP);
1370 	return;
1371 }
1372