xref: /freebsd/sys/dev/hptnr/hptnr_osm_bsd.c (revision b0b1dbdd)
1 /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */
2 /*-
3  * HighPoint RAID Driver for FreeBSD
4  * Copyright (C) 2005-2011 HighPoint Technologies, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #include <dev/hptnr/hptnr_config.h>
31 #include <dev/hptnr/os_bsd.h>
32 #include <dev/hptnr/hptintf.h>
33 int msi = 0;
34 int debug_flag = 0;
35 static HIM *hpt_match(device_t dev)
36 {
37 	PCI_ID pci_id;
38 	HIM *him;
39 	int i;
40 
41 	for (him = him_list; him; him = him->next) {
42 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
43 			if (him->get_controller_count)
44 				him->get_controller_count(&pci_id,0,0);
45 			if ((pci_get_vendor(dev) == pci_id.vid) &&
46 				(pci_get_device(dev) == pci_id.did)){
47 				return (him);
48 			}
49 		}
50 	}
51 
52 	return (NULL);
53 }
54 
55 static int hpt_probe(device_t dev)
56 {
57 	HIM *him;
58 
59 	him = hpt_match(dev);
60 	if (him != NULL) {
61 		KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
62 			pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
63 			));
64 		device_set_desc(dev, him->name);
65 		return (BUS_PROBE_DEFAULT);
66 	}
67 
68 	return (ENXIO);
69 }
70 
71 static int hpt_attach(device_t dev)
72 {
73 	PHBA hba = (PHBA)device_get_softc(dev);
74 	HIM *him;
75 	PCI_ID pci_id;
76 	HPT_UINT size;
77 	PVBUS vbus;
78 	PVBUS_EXT vbus_ext;
79 
80 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
81 
82 	him = hpt_match(dev);
83 	hba->ext_type = EXT_TYPE_HBA;
84 	hba->ldm_adapter.him = him;
85 
86 	pci_enable_busmaster(dev);
87 
88 	pci_id.vid = pci_get_vendor(dev);
89 	pci_id.did = pci_get_device(dev);
90 	pci_id.rev = pci_get_revid(dev);
91 	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
92 
93 	size = him->get_adapter_size(&pci_id);
94 	hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
95 
96 	hba->pcidev = dev;
97 	hba->pciaddr.tree = 0;
98 	hba->pciaddr.bus = pci_get_bus(dev);
99 	hba->pciaddr.device = pci_get_slot(dev);
100 	hba->pciaddr.function = pci_get_function(dev);
101 
102 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
103 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
104 		return ENXIO;
105 	}
106 
107 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
108 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
109 
110 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
111 		size = ldm_get_vbus_size();
112 		vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK |
113 			M_ZERO);
114 		vbus_ext->ext_type = EXT_TYPE_VBUS;
115 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
116 		ldm_register_adapter(&hba->ldm_adapter);
117 	}
118 
119 	ldm_for_each_vbus(vbus, vbus_ext) {
120 		if (hba->ldm_adapter.vbus==vbus) {
121 			hba->vbus_ext = vbus_ext;
122 			hba->next = vbus_ext->hba_list;
123 			vbus_ext->hba_list = hba;
124 			break;
125 		}
126 	}
127 	return 0;
128 }
129 
130 /*
131  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
132  * but there are some problems currently (alignment, etc).
133  */
134 static __inline void *__get_free_pages(int order)
135 {
136 	/* don't use low memory - other devices may get starved */
137 	return contigmalloc(PAGE_SIZE<<order,
138 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
139 }
140 
141 static __inline void free_pages(void *p, int order)
142 {
143 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
144 }
145 
146 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
147 {
148 	PHBA hba;
149 	struct freelist *f;
150 	HPT_UINT i;
151 	void **p;
152 
153 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
154 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
155 
156 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
157 
158 	for (f=vbus_ext->freelist_head; f; f=f->next) {
159 		KdPrint(("%s: %d*%d=%d bytes",
160 			f->tag, f->count, f->size, f->count*f->size));
161 		for (i=0; i<f->count; i++) {
162 			p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
163 			if (!p)	return (ENXIO);
164 			*p = f->head;
165 			f->head = p;
166 		}
167 	}
168 
169 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
170 		int order, size, j;
171 
172 		HPT_ASSERT((f->size & (f->alignment-1))==0);
173 
174 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
175 			;
176 
177 		KdPrint(("%s: %d*%d=%d bytes, order %d",
178 			f->tag, f->count, f->size, f->count*f->size, order));
179 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
180 
181 		for (i=0; i<f->count;) {
182 			p = (void **)__get_free_pages(order);
183 			if (!p) return -1;
184 			for (j = size/f->size; j && i<f->count; i++,j--) {
185 				*p = f->head;
186 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
187 				f->head = p;
188 				p = (void **)((unsigned long)p + f->size);
189 			}
190 		}
191 	}
192 
193 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
194 
195 	for (i=0; i<os_max_cache_pages; i++) {
196 		p = (void **)__get_free_pages(0);
197 		if (!p) return -1;
198 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
199 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
200 	}
201 
202 	return 0;
203 }
204 
205 static void hpt_free_mem(PVBUS_EXT vbus_ext)
206 {
207 	struct freelist *f;
208 	void *p;
209 	int i;
210 	BUS_ADDRESS bus;
211 
212 	for (f=vbus_ext->freelist_head; f; f=f->next) {
213 #if DBG
214 		if (f->count!=f->reserved_count) {
215 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
216 		}
217 #endif
218 		while ((p=freelist_get(f)))
219 			free(p, M_DEVBUF);
220 	}
221 
222 	for (i=0; i<os_max_cache_pages; i++) {
223 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
224 		HPT_ASSERT(p);
225 		free_pages(p, 0);
226 	}
227 
228 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
229 		int order, size;
230 #if DBG
231 		if (f->count!=f->reserved_count) {
232 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
233 		}
234 #endif
235 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
236 
237 		while ((p=freelist_get_dma(f, &bus))) {
238 			if (order)
239 				free_pages(p, order);
240 			else {
241 			/* can't free immediately since other blocks in this page may still be in the list */
242 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
243 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
244 			}
245 		}
246 	}
247 
248 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
249 		free_pages(p, 0);
250 }
251 
252 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
253 {
254 	PHBA hba;
255 
256 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
257 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
258 			KdPrint(("fail to initialize %p", hba));
259 			return -1;
260 		}
261 
262 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
263 	return 0;
264 }
265 
266 static void hpt_flush_done(PCOMMAND pCmd)
267 {
268 	PVDEV vd = pCmd->target;
269 
270 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
271 		vd = vd->u.array.transform->target;
272 		HPT_ASSERT(vd);
273 		pCmd->target = vd;
274 		pCmd->Result = RETURN_PENDING;
275 		vdev_queue_cmd(pCmd);
276 		return;
277 	}
278 
279 	*(int *)pCmd->priv = 1;
280 	wakeup(pCmd);
281 }
282 
283 /*
284  * flush a vdev (without retry).
285  */
286 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
287 {
288 	PCOMMAND pCmd;
289 	int result = 0, done;
290 	HPT_UINT count;
291 
292 	KdPrint(("flusing dev %p", vd));
293 
294 	hpt_assert_vbus_locked(vbus_ext);
295 
296 	if (mIsArray(vd->type) && vd->u.array.transform)
297 		count = max(vd->u.array.transform->source->cmds_per_request,
298 					vd->u.array.transform->target->cmds_per_request);
299 	else
300 		count = vd->cmds_per_request;
301 
302 	pCmd = ldm_alloc_cmds(vd->vbus, count);
303 
304 	if (!pCmd) {
305 		return -1;
306 	}
307 
308 	pCmd->type = CMD_TYPE_FLUSH;
309 	pCmd->flags.hard_flush = 1;
310 	pCmd->target = vd;
311 	pCmd->done = hpt_flush_done;
312 	done = 0;
313 	pCmd->priv = &done;
314 
315 	ldm_queue_cmd(pCmd);
316 
317 	if (!done) {
318 		while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
319 			ldm_reset_vbus(vd->vbus);
320 		}
321 	}
322 
323 	KdPrint(("flush result %d", pCmd->Result));
324 
325 	if (pCmd->Result!=RETURN_SUCCESS)
326 		result = -1;
327 
328 	ldm_free_cmds(pCmd);
329 
330 	return result;
331 }
332 
333 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
334 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
335 {
336 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
337 	PHBA hba;
338 	int i;
339 
340 	KdPrint(("hpt_shutdown_vbus"));
341 
342 	/* stop all ctl tasks and disable the worker taskqueue */
343 	hpt_stop_tasks(vbus_ext);
344 	hpt_lock_vbus(vbus_ext);
345 	vbus_ext->worker.ta_context = 0;
346 
347 	/* flush devices */
348 	for (i=0; i<osm_max_targets; i++) {
349 		PVDEV vd = ldm_find_target(vbus, i);
350 		if (vd) {
351 			/* retry once */
352 			if (hpt_flush_vdev(vbus_ext, vd))
353 				hpt_flush_vdev(vbus_ext, vd);
354 		}
355 	}
356 
357 	ldm_shutdown(vbus);
358 	hpt_unlock_vbus(vbus_ext);
359 
360 	ldm_release_vbus(vbus);
361 
362 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
363 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
364 
365 	hpt_free_mem(vbus_ext);
366 
367 	while ((hba=vbus_ext->hba_list)) {
368 		vbus_ext->hba_list = hba->next;
369 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
370 	}
371 
372 	callout_drain(&vbus_ext->timer);
373 	mtx_destroy(&vbus_ext->lock);
374 	free(vbus_ext, M_DEVBUF);
375 	KdPrint(("hpt_shutdown_vbus done"));
376 }
377 
378 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
379 {
380 	OSM_TASK *tasks;
381 
382 	tasks = vbus_ext->tasks;
383 	vbus_ext->tasks = 0;
384 
385 	while (tasks) {
386 		OSM_TASK *t = tasks;
387 		tasks = t->next;
388 		t->next = 0;
389 		t->func(vbus_ext->vbus, t->data);
390 	}
391 }
392 
393 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
394 {
395 	if(vbus_ext){
396 		hpt_lock_vbus(vbus_ext);
397 		__hpt_do_tasks(vbus_ext);
398 		hpt_unlock_vbus(vbus_ext);
399 	}
400 }
401 
402 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
403 static void hpt_poll(struct cam_sim *sim);
404 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
405 static void hpt_pci_intr(void *arg);
406 
407 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
408 {
409 	POS_CMDEXT p = vbus_ext->cmdext_list;
410 	if (p)
411 		vbus_ext->cmdext_list = p->next;
412 	return p;
413 }
414 
415 static __inline void cmdext_put(POS_CMDEXT p)
416 {
417 	p->next = p->vbus_ext->cmdext_list;
418 	p->vbus_ext->cmdext_list = p;
419 }
420 
421 static void hpt_timeout(void *arg)
422 {
423 	PCOMMAND pCmd = (PCOMMAND)arg;
424 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
425 
426 	KdPrint(("pCmd %p timeout", pCmd));
427 
428 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
429 }
430 
431 static void os_cmddone(PCOMMAND pCmd)
432 {
433 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
434 	union ccb *ccb = ext->ccb;
435 	HPT_U8 *cdb;
436 
437 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
438 		cdb = ccb->csio.cdb_io.cdb_ptr;
439 	else
440 		cdb = ccb->csio.cdb_io.cdb_bytes;
441 
442 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
443 
444 	callout_stop(&ext->timeout);
445 	switch(cdb[0]) {
446 		case 0x85: /*ATA_16*/
447 		case 0xA1: /*ATA_12*/
448 		{
449 			PassthroughCmd *passthru = &pCmd->uCmd.Passthrough;
450 			HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data;
451 			memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data));
452 
453 			sense_buffer[0] = 0x72; /* Response Code */
454 			sense_buffer[7] = 14; /* Additional Sense Length */
455 
456 			sense_buffer[8] = 0x9; /* ATA Return Descriptor */
457 			sense_buffer[9] = 0xc; /* Additional Descriptor Length */
458 			sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */
459 			sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg;  /* Sector Count (7:0) */
460 			sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */
461 			sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */
462 			sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */
463 
464 			if ((cdb[0] == 0x85) && (cdb[1] & 0x1))
465 			{
466 				sense_buffer[10] = 1;
467 				sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */
468 				sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8);	/* LBA Low (15:8) */
469 				sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */
470 				sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */
471 			}
472 
473 			sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */
474 			sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */
475 			KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x",
476 					 passthru->bCommandReg,
477 					 passthru->bFeaturesReg,
478 					 passthru->bLbaLowReg,
479 					 passthru->bLbaMidReg,
480 					 passthru->bLbaHighReg,
481 					 passthru->bDriveHeadReg,
482 					 passthru->bSectorCountReg));
483 			KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ",
484 				pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg,
485 				passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg));
486 		}
487 		default:
488 			break;
489 	}
490 
491 	switch(pCmd->Result) {
492 	case RETURN_SUCCESS:
493 		ccb->ccb_h.status = CAM_REQ_CMP;
494 		break;
495 	case RETURN_BAD_DEVICE:
496 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
497 		break;
498 	case RETURN_DEVICE_BUSY:
499 		ccb->ccb_h.status = CAM_BUSY;
500 		break;
501 	case RETURN_INVALID_REQUEST:
502 		ccb->ccb_h.status = CAM_REQ_INVALID;
503 		break;
504 	case RETURN_SELECTION_TIMEOUT:
505 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
506 		break;
507 	case RETURN_RETRY:
508 		ccb->ccb_h.status = CAM_BUSY;
509 		break;
510 	default:
511 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
512 		break;
513 	}
514 
515 	if (pCmd->flags.data_in) {
516 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
517 	}
518 	else if (pCmd->flags.data_out) {
519 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
520 	}
521 
522 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
523 
524 	cmdext_put(ext);
525 	ldm_free_cmds(pCmd);
526 	xpt_done(ccb);
527 }
528 
529 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
530 {
531 	/* since we have provided physical sg, nobody will ask us to build physical sg */
532 	HPT_ASSERT(0);
533 	return FALSE;
534 }
535 
536 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
537 {
538 	PCOMMAND pCmd = (PCOMMAND)arg;
539 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
540 	PSG psg = pCmd->psg;
541 	int idx;
542 
543 	HPT_ASSERT(pCmd->flags.physical_sg);
544 
545 	if (error)
546 		panic("busdma error");
547 
548 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
549 
550 	if (nsegs != 0) {
551 		for (idx = 0; idx < nsegs; idx++, psg++) {
552 			psg->addr.bus = segs[idx].ds_addr;
553 			psg->size = segs[idx].ds_len;
554 			psg->eot = 0;
555 		}
556 		psg[-1].eot = 1;
557 
558 		if (pCmd->flags.data_in) {
559 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
560 			    BUS_DMASYNC_PREREAD);
561 		}
562 		else if (pCmd->flags.data_out) {
563 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
564 			    BUS_DMASYNC_PREWRITE);
565 		}
566 	}
567 
568 	callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
569 	ldm_queue_cmd(pCmd);
570 }
571 
572 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
573 {
574 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
575 	PVDEV vd;
576 	PCOMMAND pCmd;
577 	POS_CMDEXT ext;
578 	HPT_U8 *cdb;
579 
580 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
581 		cdb = ccb->csio.cdb_io.cdb_ptr;
582 	else
583 		cdb = ccb->csio.cdb_io.cdb_bytes;
584 
585 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
586 		ccb,
587 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
588 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
589 	));
590 
591 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
592 	if (ccb->ccb_h.target_lun != 0 ||
593 		ccb->ccb_h.target_id >= osm_max_targets ||
594 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
595 	{
596 		ccb->ccb_h.status = CAM_TID_INVALID;
597 		xpt_done(ccb);
598 		return;
599 	}
600 
601 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
602 
603 	if (!vd) {
604 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
605 		xpt_done(ccb);
606 		return;
607 	}
608 
609 	switch (cdb[0]) {
610 	case TEST_UNIT_READY:
611 	case START_STOP_UNIT:
612 	case SYNCHRONIZE_CACHE:
613 		ccb->ccb_h.status = CAM_REQ_CMP;
614 		break;
615 
616 	case 0x85: /*ATA_16*/
617 	case 0xA1: /*ATA_12*/
618 	{
619 		int error;
620 		HPT_U8 prot;
621 		PassthroughCmd *passthru;
622 
623 		if (mIsArray(vd->type)) {
624 			ccb->ccb_h.status = CAM_REQ_INVALID;
625 			break;
626 		}
627 
628 		HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk);
629 
630 		prot = (cdb[1] & 0x1e) >> 1;
631 
632 
633 		if (prot < 3 || prot > 5)
634 		{
635 			ccb->ccb_h.status = CAM_REQ_INVALID;
636 			break;
637 		}
638 
639 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
640 		if (!pCmd) {
641 			HPT_ASSERT(0);
642 			ccb->ccb_h.status = CAM_BUSY;
643 			break;
644 		}
645 
646 		passthru = &pCmd->uCmd.Passthrough;
647 		if (cdb[0] == 0x85/*ATA_16*/) {
648 			if (cdb[1] & 0x1) {
649 				passthru->bFeaturesReg =
650 					((HPT_U16)cdb[3] << 8)
651 						| cdb[4];
652 				passthru->bSectorCountReg =
653 					((HPT_U16)cdb[5] << 8) |
654 						cdb[6];
655 				passthru->bLbaLowReg =
656 					((HPT_U16)cdb[7] << 8) |
657 						cdb[8];
658 				passthru->bLbaMidReg =
659 					((HPT_U16)cdb[9] << 8) |
660 						cdb[10];
661 				passthru->bLbaHighReg =
662 					((HPT_U16)cdb[11] << 8) |
663 						cdb[12];
664 			} else {
665 				passthru->bFeaturesReg = cdb[4];
666 				passthru->bSectorCountReg = cdb[6];
667 				passthru->bLbaLowReg = cdb[8];
668 				passthru->bLbaMidReg = cdb[10];
669 				passthru->bLbaHighReg = cdb[12];
670 			}
671 			passthru->bDriveHeadReg = cdb[13];
672 			passthru->bCommandReg = cdb[14];
673 
674 		} else { /*ATA_12*/
675 
676 			passthru->bFeaturesReg = cdb[3];
677 			passthru->bSectorCountReg = cdb[4];
678 			passthru->bLbaLowReg = cdb[5];
679 			passthru->bLbaMidReg = cdb[6];
680 			passthru->bLbaHighReg = cdb[7];
681 			passthru->bDriveHeadReg = cdb[8];
682 			passthru->bCommandReg = cdb[9];
683 		}
684 
685 		if (cdb[1] & 0xe0) {
686 
687 
688 			if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI ||
689 				passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT ||
690 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI ||
691 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT ||
692 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT)
693 				) {
694 				goto error;
695 			}
696 		}
697 
698 
699 		if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER &&
700 			passthru->bCommandReg == ATA_CMD_SET_FEATURES) {
701 			goto error;
702 		}
703 
704 
705 		passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE;
706 		switch (prot) {
707 			default: /*None data*/
708 				break;
709 			case 4: /*PIO data in, T_DIR=1 match check*/
710 				if ((cdb[2] & 3) &&
711 					(cdb[2] & 0x8) == 0)
712 				{
713 					OsPrint(("PIO data in, T_DIR=1 match check"));
714 					goto error;
715 				}
716 				pCmd->flags.data_in = 1;
717 						break;
718 			case 5: /*PIO data out, T_DIR=0 match check*/
719 				if ((cdb[2] & 3) &&
720 					(cdb[2] & 0x8))
721 				{
722 					OsPrint(("PIO data out, T_DIR=0 match check"));
723 					goto error;
724 				}
725 
726 				pCmd->flags.data_out = 1;
727 				break;
728 		}
729 		pCmd->type = CMD_TYPE_PASSTHROUGH;
730 		pCmd->priv = ext = cmdext_get(vbus_ext);
731 		HPT_ASSERT(ext);
732 		ext->ccb = ccb;
733 		pCmd->target = vd;
734 		pCmd->done = os_cmddone;
735 		pCmd->buildsgl = os_buildsgl;
736 		pCmd->psg = ext->psg;
737 
738 		if(!ccb->csio.dxfer_len)
739 		{
740 			ldm_queue_cmd(pCmd);
741 			return;
742 		}
743 		pCmd->flags.physical_sg = 1;
744 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
745 					ext->dma_map, ccb,
746 					hpt_io_dmamap_callback, pCmd,
747 				    	BUS_DMA_WAITOK
748 					);
749 		KdPrint(("bus_dmamap_load return %d", error));
750 		if (error && error!=EINPROGRESS) {
751 			os_printk("bus_dmamap_load error %d", error);
752 			cmdext_put(ext);
753 			ldm_free_cmds(pCmd);
754 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
755 			xpt_done(ccb);
756 		}
757 		return;
758 error:
759 		ldm_free_cmds(pCmd);
760 		ccb->ccb_h.status = CAM_REQ_INVALID;
761 		break;
762 	}
763 
764 	case INQUIRY:
765 	{
766 		PINQUIRYDATA inquiryData;
767 		HIM_DEVICE_CONFIG devconf;
768 		HPT_U8 *rbuf;
769 
770 		memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
771 		inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
772 
773 		if (cdb[1] & 1) {
774 			rbuf = (HPT_U8 *)inquiryData;
775 			switch(cdb[2]) {
776 			case 0:
777 				rbuf[0] = 0;
778 				rbuf[1] = 0;
779 				rbuf[2] = 0;
780 				rbuf[3] = 3;
781 				rbuf[4] = 0;
782 				rbuf[5] = 0x80;
783 				rbuf[6] = 0x83;
784 				ccb->ccb_h.status = CAM_REQ_CMP;
785 				break;
786 			case 0x80: {
787 				rbuf[0] = 0;
788 				rbuf[1] = 0x80;
789 				rbuf[2] = 0;
790 				if (vd->type == VD_RAW) {
791 					rbuf[3] = 20;
792 					vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf);
793 					memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20);
794 					ldm_ide_fixstring(&rbuf[4], 20);
795 				} else {
796 					rbuf[3] = 1;
797 					rbuf[4] = 0x20;
798 				}
799 				ccb->ccb_h.status = CAM_REQ_CMP;
800 				break;
801 			}
802 			case 0x83:
803 				rbuf[0] = 0;
804 				rbuf[1] = 0x83;
805 				rbuf[2] = 0;
806 				rbuf[3] = 12;
807 				rbuf[4] = 1;
808 				rbuf[5] = 2;
809 				rbuf[6] = 0;
810 				rbuf[7] = 8;
811 				rbuf[8] = 0;
812 				rbuf[9] = 0x19;
813 				rbuf[10] = 0x3C;
814 				rbuf[11] = 0;
815 				rbuf[12] = 0;
816 				rbuf[13] = 0;
817 				rbuf[14] = 0;
818 				rbuf[15] = 0;
819 				ccb->ccb_h.status = CAM_REQ_CMP;
820 				break;
821 			default:
822 				ccb->ccb_h.status = CAM_REQ_INVALID;
823 				break;
824 			}
825 
826 			break;
827 		}
828 		else if (cdb[2]) {
829 			ccb->ccb_h.status = CAM_REQ_INVALID;
830 			break;
831 		}
832 
833 		inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/
834 		inquiryData->Versions = 5; /*SPC-3*/
835 		inquiryData->ResponseDataFormat = 2;
836 		inquiryData->AdditionalLength = 0x5b;
837 		inquiryData->CommandQueue = 1;
838 
839 		if (ccb->csio.dxfer_len > 63) {
840 			rbuf = (HPT_U8 *)inquiryData;
841 			rbuf[58] = 0x60;
842 			rbuf[59] = 0x3;
843 
844 			rbuf[64] = 0x3;
845 			rbuf[66] = 0x3;
846 			rbuf[67] = 0x20;
847 
848 		}
849 
850 		if (vd->type == VD_RAW) {
851 			vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf);
852 
853 			if ((devconf.pIdentifyData->GeneralConfiguration & 0x80))
854 				inquiryData->RemovableMedia = 1;
855 
856 
857 			memcpy(&inquiryData->VendorId, "ATA     ", 8);
858 			memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16);
859 			ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16);
860 			memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4);
861 			ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4);
862 			if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ')
863 				memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4);
864 		} else {
865 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
866 			snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d        ",
867 				os_get_vbus_seq(vbus_ext), vd->target_id);
868 			inquiryData->ProductId[15] = ' ';
869 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
870 		}
871 
872 		ccb->ccb_h.status = CAM_REQ_CMP;
873 		break;
874 	}
875 	case READ_CAPACITY:
876 	{
877 		HPT_U8 *rbuf = ccb->csio.data_ptr;
878 		HPT_U32 cap;
879 		HPT_U8 sector_size_shift = 0;
880 		HPT_U64 new_cap;
881 		HPT_U32 sector_size = 0;
882 
883 		if (mIsArray(vd->type))
884 			sector_size_shift = vd->u.array.sector_size_shift;
885 		else{
886 			if(vd->type == VD_RAW){
887 				sector_size = vd->u.raw.logical_sector_size;
888 			}
889 
890 			switch (sector_size) {
891 				case 0x1000:
892 					KdPrint(("set 4k setctor size in READ_CAPACITY"));
893 					sector_size_shift = 3;
894 					break;
895 				default:
896 					break;
897 			}
898 		}
899 		new_cap = vd->capacity >> sector_size_shift;
900 
901 		if (new_cap > 0xfffffffful)
902 			cap = 0xffffffff;
903 		else
904 			cap = new_cap - 1;
905 
906 		rbuf[0] = (HPT_U8)(cap>>24);
907 		rbuf[1] = (HPT_U8)(cap>>16);
908 		rbuf[2] = (HPT_U8)(cap>>8);
909 		rbuf[3] = (HPT_U8)cap;
910 		rbuf[4] = 0;
911 		rbuf[5] = 0;
912 		rbuf[6] = 2 << sector_size_shift;
913 		rbuf[7] = 0;
914 
915 		ccb->ccb_h.status = CAM_REQ_CMP;
916 		break;
917 	}
918 
919 	case REPORT_LUNS:
920 	{
921 		HPT_U8 *rbuf = ccb->csio.data_ptr;
922 		memset(rbuf, 0, 16);
923 		rbuf[3] = 8;
924 		ccb->ccb_h.status = CAM_REQ_CMP;
925 		break;
926 	}
927 	case SERVICE_ACTION_IN:
928 	{
929 		HPT_U8 *rbuf = ccb->csio.data_ptr;
930 		HPT_U64	cap = 0;
931 		HPT_U8 sector_size_shift = 0;
932 		HPT_U32 sector_size = 0;
933 
934 		if(mIsArray(vd->type))
935 			sector_size_shift = vd->u.array.sector_size_shift;
936 		else{
937 			if(vd->type == VD_RAW){
938 				sector_size = vd->u.raw.logical_sector_size;
939 			}
940 
941 			switch (sector_size) {
942 				case 0x1000:
943 					KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
944 					sector_size_shift = 3;
945 					break;
946 				default:
947 					break;
948 			}
949 		}
950 		cap = (vd->capacity >> sector_size_shift) - 1;
951 
952 		rbuf[0] = (HPT_U8)(cap>>56);
953 		rbuf[1] = (HPT_U8)(cap>>48);
954 		rbuf[2] = (HPT_U8)(cap>>40);
955 		rbuf[3] = (HPT_U8)(cap>>32);
956 		rbuf[4] = (HPT_U8)(cap>>24);
957 		rbuf[5] = (HPT_U8)(cap>>16);
958 		rbuf[6] = (HPT_U8)(cap>>8);
959 		rbuf[7] = (HPT_U8)cap;
960 		rbuf[8] = 0;
961 		rbuf[9] = 0;
962 		rbuf[10] = 2 << sector_size_shift;
963 		rbuf[11] = 0;
964 
965 		if(!mIsArray(vd->type)){
966 			rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector;
967 			rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f);
968 			rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned);
969 		}
970 
971 		ccb->ccb_h.status = CAM_REQ_CMP;
972 		break;
973 	}
974 
975 	case READ_6:
976 	case READ_10:
977 	case READ_16:
978 	case WRITE_6:
979 	case WRITE_10:
980 	case WRITE_16:
981 	case 0x13:
982 	case 0x2f:
983 	case 0x8f: /* VERIFY_16 */
984 	{
985 		int error;
986 		HPT_U8 sector_size_shift = 0;
987 		HPT_U32 sector_size = 0;
988 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
989 		if(!pCmd){
990 			KdPrint(("Failed to allocate command!"));
991 			ccb->ccb_h.status = CAM_BUSY;
992 			break;
993 		}
994 
995 		switch (cdb[0])	{
996 		case READ_6:
997 		case WRITE_6:
998 		case 0x13:
999 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
1000 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
1001 			break;
1002 		case READ_16:
1003 		case WRITE_16:
1004 		case 0x8f: /* VERIFY_16 */
1005 		{
1006 			HPT_U64 block =
1007 				((HPT_U64)cdb[2]<<56) |
1008 				((HPT_U64)cdb[3]<<48) |
1009 				((HPT_U64)cdb[4]<<40) |
1010 				((HPT_U64)cdb[5]<<32) |
1011 				((HPT_U64)cdb[6]<<24) |
1012 				((HPT_U64)cdb[7]<<16) |
1013 				((HPT_U64)cdb[8]<<8) |
1014 				((HPT_U64)cdb[9]);
1015 			pCmd->uCmd.Ide.Lba = block;
1016 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
1017 			break;
1018 		}
1019 
1020 		default:
1021 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
1022 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
1023 			break;
1024 		}
1025 
1026 		if(mIsArray(vd->type)) {
1027 			sector_size_shift = vd->u.array.sector_size_shift;
1028 		}
1029 		else{
1030 			if(vd->type == VD_RAW){
1031 				sector_size = vd->u.raw.logical_sector_size;
1032 			}
1033 
1034 			switch (sector_size) {
1035 				case 0x1000:
1036 					KdPrint(("<8>resize sector size from 4k to 512"));
1037 					sector_size_shift = 3;
1038 					break;
1039 				default:
1040 					break;
1041 	 		}
1042 		}
1043 		pCmd->uCmd.Ide.Lba <<= sector_size_shift;
1044 		pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
1045 
1046 
1047 		switch (cdb[0]) {
1048 		case READ_6:
1049 		case READ_10:
1050 		case READ_16:
1051 			pCmd->flags.data_in = 1;
1052 			break;
1053 		case WRITE_6:
1054 		case WRITE_10:
1055 		case WRITE_16:
1056 			pCmd->flags.data_out = 1;
1057 			break;
1058 		}
1059 		pCmd->priv = ext = cmdext_get(vbus_ext);
1060 		HPT_ASSERT(ext);
1061 		ext->ccb = ccb;
1062 		pCmd->target = vd;
1063 		pCmd->done = os_cmddone;
1064 		pCmd->buildsgl = os_buildsgl;
1065 		pCmd->psg = ext->psg;
1066 		pCmd->flags.physical_sg = 1;
1067 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
1068 					ext->dma_map, ccb,
1069 					hpt_io_dmamap_callback, pCmd,
1070 				    	BUS_DMA_WAITOK
1071 					);
1072 		KdPrint(("bus_dmamap_load return %d", error));
1073 		if (error && error!=EINPROGRESS) {
1074 			os_printk("bus_dmamap_load error %d", error);
1075 			cmdext_put(ext);
1076 			ldm_free_cmds(pCmd);
1077 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1078 			xpt_done(ccb);
1079 		}
1080 		return;
1081 	}
1082 
1083 	default:
1084 		ccb->ccb_h.status = CAM_REQ_INVALID;
1085 		break;
1086 	}
1087 
1088 	xpt_done(ccb);
1089 	return;
1090 }
1091 
1092 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
1093 {
1094 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
1095 
1096 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
1097 
1098 	hpt_assert_vbus_locked(vbus_ext);
1099 	switch (ccb->ccb_h.func_code) {
1100 
1101 	case XPT_SCSI_IO:
1102 		hpt_scsi_io(vbus_ext, ccb);
1103 		return;
1104 
1105 	case XPT_RESET_BUS:
1106 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
1107 		break;
1108 
1109 	case XPT_GET_TRAN_SETTINGS:
1110 	case XPT_SET_TRAN_SETTINGS:
1111 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1112 		break;
1113 
1114 	case XPT_CALC_GEOMETRY:
1115 		ccb->ccg.heads = 255;
1116 		ccb->ccg.secs_per_track = 63;
1117 		ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
1118 		ccb->ccb_h.status = CAM_REQ_CMP;
1119 		break;
1120 
1121 	case XPT_PATH_INQ:
1122 	{
1123 		struct ccb_pathinq *cpi = &ccb->cpi;
1124 
1125 		cpi->version_num = 1;
1126 		cpi->hba_inquiry = PI_SDTR_ABLE;
1127 		cpi->target_sprt = 0;
1128 		cpi->hba_misc = PIM_NOBUSRESET;
1129 		cpi->hba_eng_cnt = 0;
1130 		cpi->max_target = osm_max_targets;
1131 		cpi->max_lun = 0;
1132 		cpi->unit_number = cam_sim_unit(sim);
1133 		cpi->bus_id = cam_sim_bus(sim);
1134 		cpi->initiator_id = osm_max_targets;
1135 		cpi->base_transfer_speed = 3300;
1136 
1137 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1138 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1139 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1140 		cpi->transport = XPORT_SPI;
1141 		cpi->transport_version = 2;
1142 		cpi->protocol = PROTO_SCSI;
1143 		cpi->protocol_version = SCSI_REV_2;
1144 		cpi->ccb_h.status = CAM_REQ_CMP;
1145 		break;
1146 	}
1147 
1148 	default:
1149 		ccb->ccb_h.status = CAM_REQ_INVALID;
1150 		break;
1151 	}
1152 
1153 	xpt_done(ccb);
1154 	return;
1155 }
1156 
1157 static void hpt_pci_intr(void *arg)
1158 {
1159 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
1160 	hpt_lock_vbus(vbus_ext);
1161 	ldm_intr((PVBUS)vbus_ext->vbus);
1162 	hpt_unlock_vbus(vbus_ext);
1163 }
1164 
1165 static void hpt_poll(struct cam_sim *sim)
1166 {
1167 	PVBUS_EXT vbus_ext = cam_sim_softc(sim);
1168 	hpt_assert_vbus_locked(vbus_ext);
1169 	ldm_intr((PVBUS)vbus_ext->vbus);
1170 }
1171 
1172 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
1173 {
1174 	KdPrint(("hpt_async"));
1175 }
1176 
1177 static int hpt_shutdown(device_t dev)
1178 {
1179 	KdPrint(("hpt_shutdown(dev=%p)", dev));
1180 	return 0;
1181 }
1182 
1183 static int hpt_detach(device_t dev)
1184 {
1185 	/* we don't allow the driver to be unloaded. */
1186 	return EBUSY;
1187 }
1188 
1189 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
1190 {
1191 	arg->ioctl_cmnd = 0;
1192 	wakeup(arg);
1193 }
1194 
1195 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
1196 {
1197 	ioctl_args->result = -1;
1198 	ioctl_args->done = hpt_ioctl_done;
1199 	ioctl_args->ioctl_cmnd = (void *)1;
1200 
1201 	hpt_lock_vbus(vbus_ext);
1202 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
1203 
1204 	while (ioctl_args->ioctl_cmnd) {
1205 		if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1206 			break;
1207 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
1208 		__hpt_do_tasks(vbus_ext);
1209 	}
1210 
1211 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
1212 
1213 	hpt_unlock_vbus(vbus_ext);
1214 }
1215 
1216 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
1217 {
1218 	PVBUS vbus;
1219 	PVBUS_EXT vbus_ext;
1220 
1221 	ldm_for_each_vbus(vbus, vbus_ext) {
1222 		__hpt_do_ioctl(vbus_ext, ioctl_args);
1223 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
1224 			return;
1225 	}
1226 }
1227 
1228 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
1229 	IOCTL_ARG arg;\
1230 	arg.dwIoControlCode = code;\
1231 	arg.lpInBuffer = inbuf;\
1232 	arg.lpOutBuffer = outbuf;\
1233 	arg.nInBufferSize = insize;\
1234 	arg.nOutBufferSize = outsize;\
1235 	arg.lpBytesReturned = 0;\
1236 	hpt_do_ioctl(&arg);\
1237 	arg.result;\
1238 })
1239 
1240 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
1241 
1242 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
1243 {
1244 	int i;
1245 	HPT_U32 count = nMaxCount-1;
1246 
1247 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
1248 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
1249 		return -1;
1250 
1251 	nMaxCount = (int)pIds[0];
1252 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
1253 	return nMaxCount;
1254 }
1255 
1256 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
1257 {
1258 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
1259 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
1260 }
1261 
1262 /* not belong to this file logically, but we want to use ioctl interface */
1263 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
1264 {
1265 	LOGICAL_DEVICE_INFO_V3 devinfo;
1266 	int i, result;
1267 	DEVICEID param[2] = { id, 0 };
1268 
1269 	if (hpt_get_device_info_v3(id, &devinfo))
1270 		return -1;
1271 
1272 	if (devinfo.Type!=LDT_ARRAY)
1273 		return -1;
1274 
1275 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
1276 		param[1] = AS_REBUILD_ABORT;
1277 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
1278 		param[1] = AS_VERIFY_ABORT;
1279 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1280 		param[1] = AS_INITIALIZE_ABORT;
1281 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1282 		param[1] = AS_TRANSFORM_ABORT;
1283 	else
1284 		return -1;
1285 
1286 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1287 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1288 				param, sizeof(param), 0, 0);
1289 
1290 	for (i=0; i<devinfo.u.array.nDisk; i++)
1291 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1292 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1293 
1294 	return result;
1295 }
1296 
1297 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1298 {
1299 	DEVICEID ids[32];
1300 	int i, count;
1301 
1302 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1303 
1304 	for (i=0; i<count; i++)
1305 		__hpt_stop_tasks(vbus_ext, ids[i]);
1306 }
1307 
1308 static	d_open_t	hpt_open;
1309 static	d_close_t	hpt_close;
1310 static	d_ioctl_t	hpt_ioctl;
1311 static  int 		hpt_rescan_bus(void);
1312 
1313 static struct cdevsw hpt_cdevsw = {
1314 	.d_open =	hpt_open,
1315 	.d_close =	hpt_close,
1316 	.d_ioctl =	hpt_ioctl,
1317 	.d_name =	driver_name,
1318 	.d_version =	D_VERSION,
1319 };
1320 
1321 static struct intr_config_hook hpt_ich;
1322 
1323 /*
1324  * hpt_final_init will be called after all hpt_attach.
1325  */
1326 static void hpt_final_init(void *dummy)
1327 {
1328 	int       i,unit_number=0;
1329 	PVBUS_EXT vbus_ext;
1330 	PVBUS vbus;
1331 	PHBA hba;
1332 
1333 	/* Clear the config hook */
1334 	config_intrhook_disestablish(&hpt_ich);
1335 
1336 	/* allocate memory */
1337 	i = 0;
1338 	ldm_for_each_vbus(vbus, vbus_ext) {
1339 		if (hpt_alloc_mem(vbus_ext)) {
1340 			os_printk("out of memory");
1341 			return;
1342 		}
1343 		i++;
1344 	}
1345 
1346 	if (!i) {
1347 		if (bootverbose)
1348 			os_printk("no controller detected.");
1349 		return;
1350 	}
1351 
1352 	/* initializing hardware */
1353 	ldm_for_each_vbus(vbus, vbus_ext) {
1354 		/* make timer available here */
1355 		mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1356 		callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1357 		if (hpt_init_vbus(vbus_ext)) {
1358 			os_printk("fail to initialize hardware");
1359 			break; /* FIXME */
1360 		}
1361 	}
1362 
1363 	/* register CAM interface */
1364 	ldm_for_each_vbus(vbus, vbus_ext) {
1365 		struct cam_devq *devq;
1366 		struct ccb_setasync	ccb;
1367 
1368 		if (bus_dma_tag_create(NULL,/* parent */
1369 				4,	/* alignment */
1370 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1371 				BUS_SPACE_MAXADDR,	/* lowaddr */
1372 				BUS_SPACE_MAXADDR, 	/* highaddr */
1373 				NULL, NULL, 		/* filter, filterarg */
1374 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1375 				os_max_sg_descriptors,	/* nsegments */
1376 				0x10000,	/* maxsegsize */
1377 				BUS_DMA_WAITOK,		/* flags */
1378 				busdma_lock_mutex,	/* lockfunc */
1379 				&vbus_ext->lock,		/* lockfuncarg */
1380 				&vbus_ext->io_dmat	/* tag */))
1381 		{
1382 			return ;
1383 		}
1384 
1385 		for (i=0; i<os_max_queue_comm; i++) {
1386 			POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1387 			if (!ext) {
1388 				os_printk("Can't alloc cmdext(%d)", i);
1389 				return ;
1390 			}
1391 			ext->vbus_ext = vbus_ext;
1392 			ext->next = vbus_ext->cmdext_list;
1393 			vbus_ext->cmdext_list = ext;
1394 
1395 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1396 				os_printk("Can't create dma map(%d)", i);
1397 				return ;
1398 			}
1399 			callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1400 		}
1401 
1402 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1403 			os_printk("cam_simq_alloc failed");
1404 			return ;
1405 		}
1406 
1407 		hpt_lock_vbus(vbus_ext);
1408 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1409 				vbus_ext, unit_number, &vbus_ext->lock,
1410 				os_max_queue_comm, /*tagged*/8,  devq);
1411 		unit_number++;
1412 		if (!vbus_ext->sim) {
1413 			os_printk("cam_sim_alloc failed");
1414 			cam_simq_free(devq);
1415 			hpt_unlock_vbus(vbus_ext);
1416 			return ;
1417 		}
1418 
1419 		if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1420 			os_printk("xpt_bus_register failed");
1421 			cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1422 			vbus_ext->sim = NULL;
1423 			return ;
1424 		}
1425 
1426 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1427 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1428 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1429 		{
1430 			os_printk("xpt_create_path failed");
1431 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1432 			cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1433 			hpt_unlock_vbus(vbus_ext);
1434 			vbus_ext->sim = NULL;
1435 			return ;
1436 		}
1437 		hpt_unlock_vbus(vbus_ext);
1438 
1439 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1440 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1441 		ccb.event_enable = AC_LOST_DEVICE;
1442 		ccb.callback = hpt_async;
1443 		ccb.callback_arg = vbus_ext;
1444 		xpt_action((union ccb *)&ccb);
1445 
1446 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1447 			int rid = 0;
1448 			if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1449 				SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1450 			{
1451 				os_printk("can't allocate interrupt");
1452 				return ;
1453 			}
1454 
1455 			if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1456 				NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1457 			{
1458 				os_printk("can't set up interrupt");
1459 				return ;
1460 			}
1461 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1462 
1463 		}
1464 
1465 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1466 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1467 		if (!vbus_ext->shutdown_eh)
1468 			os_printk("Shutdown event registration failed");
1469 	}
1470 
1471 	ldm_for_each_vbus(vbus, vbus_ext) {
1472 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1473 		if (vbus_ext->tasks)
1474 			TASK_ENQUEUE(&vbus_ext->worker);
1475 	}
1476 
1477 	make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1478 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1479 }
1480 
1481 #if defined(KLD_MODULE)
1482 
1483 typedef struct driverlink *driverlink_t;
1484 struct driverlink {
1485 	kobj_class_t	driver;
1486 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1487 };
1488 
1489 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1490 
1491 struct devclass {
1492 	TAILQ_ENTRY(devclass) link;
1493 	devclass_t	parent;		/* parent in devclass hierarchy */
1494 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1495 	char		*name;
1496 	device_t	*devices;	/* array of devices indexed by unit */
1497 	int		maxunit;	/* size of devices array */
1498 };
1499 
1500 static void override_kernel_driver(void)
1501 {
1502 	driverlink_t dl, dlfirst;
1503 	driver_t *tmpdriver;
1504 	devclass_t dc = devclass_find("pci");
1505 
1506 	if (dc){
1507 		dlfirst = TAILQ_FIRST(&dc->drivers);
1508 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1509 			if(strcmp(dl->driver->name, driver_name) == 0) {
1510 				tmpdriver=dl->driver;
1511 				dl->driver=dlfirst->driver;
1512 				dlfirst->driver=tmpdriver;
1513 				break;
1514 			}
1515 		}
1516 	}
1517 }
1518 
1519 #else
1520 #define override_kernel_driver()
1521 #endif
1522 
1523 static void hpt_init(void *dummy)
1524 {
1525 	if (bootverbose)
1526 		os_printk("%s %s", driver_name_long, driver_ver);
1527 
1528 	override_kernel_driver();
1529 	init_config();
1530 
1531 	hpt_ich.ich_func = hpt_final_init;
1532 	hpt_ich.ich_arg = NULL;
1533 	if (config_intrhook_establish(&hpt_ich) != 0) {
1534 		printf("%s: cannot establish configuration hook\n",
1535 		    driver_name_long);
1536 	}
1537 
1538 }
1539 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1540 
1541 /*
1542  * CAM driver interface
1543  */
1544 static device_method_t driver_methods[] = {
1545 	/* Device interface */
1546 	DEVMETHOD(device_probe,		hpt_probe),
1547 	DEVMETHOD(device_attach,	hpt_attach),
1548 	DEVMETHOD(device_detach,	hpt_detach),
1549 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1550 	{ 0, 0 }
1551 };
1552 
1553 static driver_t hpt_pci_driver = {
1554 	driver_name,
1555 	driver_methods,
1556 	sizeof(HBA)
1557 };
1558 
1559 static devclass_t	hpt_devclass;
1560 
1561 #ifndef TARGETNAME
1562 #error "no TARGETNAME found"
1563 #endif
1564 
1565 /* use this to make TARGETNAME be expanded */
1566 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6)
1567 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1568 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1569 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0);
1570 __MODULE_VERSION(TARGETNAME, 1);
1571 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1572 
1573 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1574 {
1575 	return 0;
1576 }
1577 
1578 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1579 {
1580 	return 0;
1581 }
1582 
1583 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1584 {
1585 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1586 	IOCTL_ARG ioctl_args;
1587 	HPT_U32 bytesReturned;
1588 
1589 	switch (cmd){
1590 	case HPT_DO_IOCONTROL:
1591 	{
1592 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1593 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1594 				piop->dwIoControlCode,
1595 				piop->lpInBuffer,
1596 				piop->nInBufferSize,
1597 				piop->lpOutBuffer,
1598 				piop->nOutBufferSize));
1599 
1600 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1601 
1602 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1603 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1604 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1605 		ioctl_args.lpBytesReturned = &bytesReturned;
1606 
1607 		if (ioctl_args.nInBufferSize) {
1608 			ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1609 			if (!ioctl_args.lpInBuffer)
1610 				goto invalid;
1611 			if (copyin((void*)piop->lpInBuffer,
1612 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1613 				goto invalid;
1614 		}
1615 
1616 		if (ioctl_args.nOutBufferSize) {
1617 			ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK);
1618 			if (!ioctl_args.lpOutBuffer)
1619 				goto invalid;
1620 		}
1621 
1622 		hpt_do_ioctl(&ioctl_args);
1623 
1624 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1625 			if (piop->nOutBufferSize) {
1626 				if (copyout(ioctl_args.lpOutBuffer,
1627 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1628 					goto invalid;
1629 			}
1630 			if (piop->lpBytesReturned) {
1631 				if (copyout(&bytesReturned,
1632 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1633 					goto invalid;
1634 			}
1635 			if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1636 			if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1637 			return 0;
1638 		}
1639 invalid:
1640 		if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1641 		if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1642 		return EFAULT;
1643 	}
1644 	return EFAULT;
1645 	}
1646 
1647 	case HPT_SCAN_BUS:
1648 	{
1649 		return hpt_rescan_bus();
1650 	}
1651 	default:
1652 		KdPrint(("invalid command!"));
1653 		return EFAULT;
1654 	}
1655 
1656 }
1657 
1658 static int	hpt_rescan_bus(void)
1659 {
1660 	union ccb			*ccb;
1661 	PVBUS 				vbus;
1662 	PVBUS_EXT			vbus_ext;
1663 
1664 	ldm_for_each_vbus(vbus, vbus_ext) {
1665 		if ((ccb = xpt_alloc_ccb()) == NULL)
1666 		{
1667 			return(ENOMEM);
1668 		}
1669 		if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1670 			CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1671 		{
1672 			xpt_free_ccb(ccb);
1673 			return(EIO);
1674 		}
1675 		xpt_rescan(ccb);
1676 	}
1677 	return(0);
1678 }
1679