xref: /dragonfly/sys/dev/raid/twa/tw_osl_freebsd.c (revision e0b1d537)
1 /*
2  * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3  * Copyright (c) 2004-05 Vinod Kashyap.
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2000 BSDi
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	$FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 254263 2013-08-12 23:30:01Z scottl $
30  */
31 
32 /*
33  * AMCC'S 3ware driver for 9000 series storage controllers.
34  *
35  * Author: Vinod Kashyap
36  * Modifications by: Adam Radford
37  * Modifications by: Manjunath Ranganathaiah
38  */
39 
40 
41 /*
42  * FreeBSD specific functions not related to CAM, and other
43  * miscellaneous functions.
44  */
45 
46 
47 #include <dev/raid/twa/tw_osl_includes.h>
48 #include <dev/raid/twa/tw_cl_fwif.h>
49 #include <dev/raid/twa/tw_cl_ioctl.h>
50 #include <dev/raid/twa/tw_osl_ioctl.h>
51 
52 #ifdef TW_OSL_DEBUG
53 TW_INT32	TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
54 TW_INT32	TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
55 #endif /* TW_OSL_DEBUG */
56 
57 static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
58 
59 
60 static	d_open_t		twa_open;
61 static	d_close_t		twa_close;
62 static	d_ioctl_t		twa_ioctl;
63 
64 static struct dev_ops twa_ops = {
65 	{ "twa", 0, 0 },
66 	.d_open =	twa_open,
67 	.d_close =	twa_close,
68 	.d_ioctl =	twa_ioctl,
69 };
70 
71 static devclass_t	twa_devclass;
72 
73 static int		twa_msi_enable = 0;
74 TUNABLE_INT("hw.twa.msi.enable", &twa_msi_enable);
75 
76 
77 /*
78  * Function name:	twa_open
79  * Description:		Called when the controller is opened.
80  *			Simply marks the controller as open.
81  *
82  * Input:		dev	-- control device corresponding to the ctlr
83  *			flags	-- mode of open
84  *			fmt	-- device type (character/block etc.)
85  *			proc	-- current process
86  * Output:		None
87  * Return value:	0	-- success
88  *			non-zero-- failure
89  */
90 static TW_INT32
91 twa_open(struct dev_open_args *ap)
92 {
93 	cdev_t			dev = ap->a_head.a_dev;
94 	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
95 
96 	tw_osli_dbg_dprintf(5, sc, "entered");
97 	sc->open = TW_CL_TRUE;
98 	return(0);
99 }
100 
101 
102 
103 /*
104  * Function name:	twa_close
105  * Description:		Called when the controller is closed.
106  *			Simply marks the controller as not open.
107  *
108  * Input:		dev	-- control device corresponding to the ctlr
109  *			flags	-- mode of corresponding open
110  *			fmt	-- device type (character/block etc.)
111  *			proc	-- current process
112  * Output:		None
113  * Return value:	0	-- success
114  *			non-zero-- failure
115  */
116 static TW_INT32
117 twa_close(struct dev_close_args *ap)
118 {
119 	cdev_t			dev = ap->a_head.a_dev;
120 	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
121 
122 	tw_osli_dbg_dprintf(5, sc, "entered");
123 	sc->open = TW_CL_FALSE;
124 	return(0);
125 }
126 
127 
128 
129 /*
130  * Function name:	twa_ioctl
131  * Description:		Called when an ioctl is posted to the controller.
132  *			Handles any OS Layer specific cmds, passes the rest
133  *			on to the Common Layer.
134  *
135  * Input:		dev	-- control device corresponding to the ctlr
136  *			cmd	-- ioctl cmd
137  *			buf	-- ptr to buffer in kernel memory, which is
138  *				   a copy of the input buffer in user-space
139  *			flags	-- mode of corresponding open
140  *			proc	-- current process
141  * Output:		buf	-- ptr to buffer in kernel memory, which will
142  *				   be copied to the output buffer in user-space
143  * Return value:	0	-- success
144  *			non-zero-- failure
145  */
146 static TW_INT32
147 twa_ioctl(struct dev_ioctl_args *ap)
148 {
149 	cdev_t			dev = ap->a_head.a_dev;
150 	u_long			cmd = ap->a_cmd;
151 	caddr_t			buf = ap->a_data;
152 	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
153 	TW_INT32		error;
154 
155 	tw_osli_dbg_dprintf(5, sc, "entered");
156 
157 	switch (cmd) {
158 	case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
159 		tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
160 		error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
161 		break;
162 
163 	case TW_OSL_IOCTL_SCAN_BUS:
164 		/* Request CAM for a bus scan. */
165 		tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
166 		error = tw_osli_request_bus_scan(sc);
167 		break;
168 
169 	default:
170 		tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
171 		error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
172 		break;
173 	}
174 	return(error);
175 }
176 
177 
178 
179 static TW_INT32	twa_probe(device_t dev);
180 static TW_INT32	twa_attach(device_t dev);
181 static TW_INT32	twa_detach(device_t dev);
182 static TW_INT32	twa_shutdown(device_t dev);
183 static TW_VOID	twa_pci_intr(TW_VOID *arg);
184 static TW_VOID	twa_watchdog(TW_VOID *arg);
185 int twa_setup_intr(struct twa_softc *sc);
186 int twa_teardown_intr(struct twa_softc *sc);
187 
188 static TW_INT32	tw_osli_alloc_mem(struct twa_softc *sc);
189 static TW_VOID	tw_osli_free_resources(struct twa_softc *sc);
190 
191 static TW_VOID	twa_map_load_data_callback(TW_VOID *arg,
192 	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
193 static TW_VOID	twa_map_load_callback(TW_VOID *arg,
194 	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
195 
196 
197 static device_method_t	twa_methods[] = {
198 	/* Device interface */
199 	DEVMETHOD(device_probe,		twa_probe),
200 	DEVMETHOD(device_attach,	twa_attach),
201 	DEVMETHOD(device_detach,	twa_detach),
202 	DEVMETHOD(device_shutdown,	twa_shutdown),
203 
204 	DEVMETHOD_END
205 };
206 
207 static driver_t	twa_pci_driver = {
208 	"twa",
209 	twa_methods,
210 	sizeof(struct twa_softc)
211 };
212 
213 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, NULL, NULL);
214 MODULE_DEPEND(twa, cam, 1, 1, 1);
215 MODULE_DEPEND(twa, pci, 1, 1, 1);
216 MODULE_VERSION(twa, 1);
217 
218 
219 /*
220  * Function name:	twa_probe
221  * Description:		Called at driver load time.  Claims 9000 ctlrs.
222  *
223  * Input:		dev	-- bus device corresponding to the ctlr
224  * Output:		None
225  * Return value:	<= 0	-- success
226  *			> 0	-- failure
227  */
228 static TW_INT32
229 twa_probe(device_t dev)
230 {
231 	static TW_UINT8	first_ctlr = 1;
232 
233 	tw_osli_dbg_printf(3, "entered");
234 
235 	if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
236 		device_set_desc(dev, TW_OSLI_DEVICE_NAME);
237 		/* Print the driver version only once. */
238 		if (first_ctlr) {
239 			kprintf("3ware device driver for 9000 series storage "
240 				"controllers, version: %s\n",
241 				TW_OSL_DRIVER_VERSION_STRING);
242 			first_ctlr = 0;
243 		}
244 		return(0);
245 	}
246 	return(ENXIO);
247 }
248 
249 int twa_setup_intr(struct twa_softc *sc)
250 {
251 	int error = 0;
252 
253 	if (!(sc->intr_handle) && (sc->irq_res)) {
254 		error = bus_setup_intr(sc->bus_dev, sc->irq_res,
255 					INTR_MPSAFE,
256 					twa_pci_intr,
257 					sc, &sc->intr_handle, NULL);
258 	}
259 	return( error );
260 }
261 
262 
263 int twa_teardown_intr(struct twa_softc *sc)
264 {
265 	int error = 0;
266 
267 	if ((sc->intr_handle) && (sc->irq_res)) {
268 		error = bus_teardown_intr(sc->bus_dev,
269 						sc->irq_res, sc->intr_handle);
270 		sc->intr_handle = NULL;
271 	}
272 	return( error );
273 }
274 
275 
276 
277 /*
278  * Function name:	twa_attach
279  * Description:		Allocates pci resources; updates sc; adds a node to the
280  *			sysctl tree to expose the driver version; makes calls
281  *			(to the Common Layer) to initialize ctlr, and to
282  *			attach to CAM.
283  *
284  * Input:		dev	-- bus device corresponding to the ctlr
285  * Output:		None
286  * Return value:	0	-- success
287  *			non-zero-- failure
288  */
289 static TW_INT32
290 twa_attach(device_t dev)
291 {
292 	struct twa_softc	*sc = device_get_softc(dev);
293 	TW_INT32		bar_num;
294 	TW_INT32		bar0_offset;
295 	TW_INT32		bar_size;
296 	TW_INT32		irq_flags;
297 	TW_INT32		error;
298 
299 	tw_osli_dbg_dprintf(3, sc, "entered");
300 
301 	sc->ctlr_handle.osl_ctlr_ctxt = sc;
302 
303 	/* Initialize the softc structure. */
304 	sc->bus_dev = dev;
305 	sc->device_id = pci_get_device(dev);
306 
307 	/* Initialize the mutexes right here. */
308 	sc->io_lock = &(sc->io_lock_handle);
309 	spin_init(sc->io_lock);
310 	sc->q_lock = &(sc->q_lock_handle);
311 	spin_init(sc->q_lock);
312 	sc->sim_lock = &(sc->sim_lock_handle);
313 	lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE);
314 
315 	sysctl_ctx_init(&sc->sysctl_ctxt);
316 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
317 		SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
318 		device_get_nameunit(dev), CTLFLAG_RD, 0, "");
319 	if (sc->sysctl_tree == NULL) {
320 		tw_osli_printf(sc, "error = %d",
321 			TW_CL_SEVERITY_ERROR_STRING,
322 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
323 			0x2000,
324 			"Cannot add sysctl tree node",
325 			ENXIO);
326 		return(ENXIO);
327 	}
328 	SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
329 		OID_AUTO, "driver_version", CTLFLAG_RD,
330 		TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
331 
332 	/* Force the busmaster enable bit on, in case the BIOS forgot. */
333 	pci_enable_busmaster(dev);
334 
335 	/* Allocate the PCI register window. */
336 	if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
337 		&bar_num, &bar0_offset, &bar_size))) {
338 		tw_osli_printf(sc, "error = %d",
339 			TW_CL_SEVERITY_ERROR_STRING,
340 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
341 			0x201F,
342 			"Can't get PCI BAR info",
343 			error);
344 		tw_osli_free_resources(sc);
345 		return(error);
346 	}
347 	sc->reg_res_id = PCIR_BARS + bar0_offset;
348 	if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
349 				&(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
350 				== NULL) {
351 		tw_osli_printf(sc, "error = %d",
352 			TW_CL_SEVERITY_ERROR_STRING,
353 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
354 			0x2002,
355 			"Can't allocate register window",
356 			ENXIO);
357 		tw_osli_free_resources(sc);
358 		return(ENXIO);
359 	}
360 	sc->bus_tag = rman_get_bustag(sc->reg_res);
361 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
362 
363 	/* Allocate and register our interrupt. */
364 	sc->irq_res_id = 0;
365 	sc->irq_type = pci_alloc_1intr(sc->bus_dev, twa_msi_enable,
366 	    &sc->irq_res_id, &irq_flags);
367 	if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
368 				&(sc->irq_res_id), 0, ~0, 1,
369 				irq_flags)) == NULL) {
370 		tw_osli_printf(sc, "error = %d",
371 			TW_CL_SEVERITY_ERROR_STRING,
372 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
373 			0x2003,
374 			"Can't allocate interrupt",
375 			ENXIO);
376 		tw_osli_free_resources(sc);
377 		return(ENXIO);
378 	}
379 	if ((error = twa_setup_intr(sc))) {
380 		tw_osli_printf(sc, "error = %d",
381 			TW_CL_SEVERITY_ERROR_STRING,
382 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
383 			0x2004,
384 			"Can't set up interrupt",
385 			error);
386 		tw_osli_free_resources(sc);
387 		return(error);
388 	}
389 
390 	if ((error = tw_osli_alloc_mem(sc))) {
391 		tw_osli_printf(sc, "error = %d",
392 			TW_CL_SEVERITY_ERROR_STRING,
393 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
394 			0x2005,
395 			"Memory allocation failure",
396 			error);
397 		tw_osli_free_resources(sc);
398 		return(error);
399 	}
400 
401 	/* Initialize the Common Layer for this controller. */
402 	if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
403 			TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
404 			sc->non_dma_mem, sc->dma_mem,
405 			sc->dma_mem_phys
406 			))) {
407 		tw_osli_printf(sc, "error = %d",
408 			TW_CL_SEVERITY_ERROR_STRING,
409 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
410 			0x2006,
411 			"Failed to initialize Common Layer/controller",
412 			error);
413 		tw_osli_free_resources(sc);
414 		return(error);
415 	}
416 
417 	/* Create the control device. */
418 	sc->ctrl_dev = make_dev(&twa_ops, device_get_unit(sc->bus_dev),
419 			UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
420 			"twa%d", device_get_unit(sc->bus_dev));
421 	sc->ctrl_dev->si_drv1 = sc;
422 
423 	if ((error = tw_osli_cam_attach(sc))) {
424 		tw_osli_free_resources(sc);
425 		tw_osli_printf(sc, "error = %d",
426 			TW_CL_SEVERITY_ERROR_STRING,
427 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
428 			0x2007,
429 			"Failed to initialize CAM",
430 			error);
431 		return(error);
432 	}
433 
434 	sc->watchdog_index = 0;
435 	callout_init_mp(&(sc->watchdog_callout[0]));
436 	callout_init_mp(&(sc->watchdog_callout[1]));
437 	callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
438 
439 	return(0);
440 }
441 
442 
443 static TW_VOID
444 twa_watchdog(TW_VOID *arg)
445 {
446 	struct tw_cl_ctlr_handle *ctlr_handle =
447 		(struct tw_cl_ctlr_handle *)arg;
448 	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
449 	int				i;
450 	int				i_need_a_reset = 0;
451 	int				driver_is_active = 0;
452 	TW_UINT64			current_time;
453 	struct tw_osli_req_context	*my_req;
454 
455 
456 //==============================================================================
457 	current_time = (TW_UINT64) (tw_osl_get_local_time());
458 
459 	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
460 		my_req = &(sc->req_ctx_buf[i]);
461 
462 		if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
463 			(my_req->deadline) &&
464 			(my_req->deadline < current_time)) {
465 			tw_cl_set_reset_needed(ctlr_handle);
466 #ifdef    TW_OSL_DEBUG
467 			device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
468 #else  /* TW_OSL_DEBUG */
469 			device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
470 #endif /* TW_OSL_DEBUG */
471 			break;
472 		}
473 	}
474 //==============================================================================
475 
476 	i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
477 
478 	i = (int) ((sc->watchdog_index++) & 1);
479 
480 	driver_is_active = tw_cl_is_active(ctlr_handle);
481 
482 	if (i_need_a_reset) {
483 #ifdef    TW_OSL_DEBUG
484 		device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
485 #endif /* TW_OSL_DEBUG */
486 		callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
487 		tw_cl_reset_ctlr(ctlr_handle);
488 #ifdef    TW_OSL_DEBUG
489 		device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
490 #endif /* TW_OSL_DEBUG */
491 	} else if (driver_is_active) {
492 		callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle);
493 	}
494 #ifdef    TW_OSL_DEBUG
495 	if (i_need_a_reset)
496 		device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
497 		"driver_is_active = %d\n",
498 		i_need_a_reset, driver_is_active);
499 #endif /* TW_OSL_DEBUG */
500 }
501 
502 
503 /*
504  * Function name:	tw_osli_alloc_mem
505  * Description:		Allocates memory needed both by CL and OSL.
506  *
507  * Input:		sc	-- OSL internal controller context
508  * Output:		None
509  * Return value:	0	-- success
510  *			non-zero-- failure
511  */
512 static TW_INT32
513 tw_osli_alloc_mem(struct twa_softc *sc)
514 {
515 	struct tw_osli_req_context	*req;
516 	TW_UINT32			max_sg_elements;
517 	TW_UINT32			non_dma_mem_size;
518 	TW_UINT32			dma_mem_size;
519 	TW_INT32			error;
520 	TW_INT32			i;
521 
522 	tw_osli_dbg_dprintf(3, sc, "entered");
523 
524 	sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
525 	sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
526 
527 	max_sg_elements = (sizeof(bus_addr_t) == 8) ?
528 		TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
529 
530 	if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
531 			sc->device_id, TW_OSLI_MAX_NUM_REQUESTS,  TW_OSLI_MAX_NUM_AENS,
532 			&(sc->alignment), &(sc->sg_size_factor),
533 			&non_dma_mem_size, &dma_mem_size
534 			))) {
535 		tw_osli_printf(sc, "error = %d",
536 			TW_CL_SEVERITY_ERROR_STRING,
537 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
538 			0x2008,
539 			"Can't get Common Layer's memory requirements",
540 			error);
541 		return(error);
542 	}
543 
544 	sc->non_dma_mem = kmalloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
545 	    M_WAITOK);
546 
547 	/* Create the parent dma tag. */
548 	if (bus_dma_tag_create(NULL,			/* parent */
549 				sc->alignment,		/* alignment */
550 				TW_OSLI_DMA_BOUNDARY,	/* boundary */
551 				BUS_SPACE_MAXADDR,	/* lowaddr */
552 				BUS_SPACE_MAXADDR, 	/* highaddr */
553 				NULL, NULL, 		/* filter, filterarg */
554 				TW_CL_MAX_IO_SIZE,	/* maxsize */
555 				max_sg_elements,	/* nsegments */
556 				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
557 				0,			/* flags */
558 				&sc->parent_tag		/* tag */)) {
559 		tw_osli_printf(sc, "error = %d",
560 			TW_CL_SEVERITY_ERROR_STRING,
561 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
562 			0x200A,
563 			"Can't allocate parent DMA tag",
564 			ENOMEM);
565 		return(ENOMEM);
566 	}
567 
568 	/* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
569 	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
570 				sc->alignment,		/* alignment */
571 				0,			/* boundary */
572 				BUS_SPACE_MAXADDR,	/* lowaddr */
573 				BUS_SPACE_MAXADDR, 	/* highaddr */
574 				NULL, NULL, 		/* filter, filterarg */
575 				dma_mem_size,		/* maxsize */
576 				1,			/* nsegments */
577 				BUS_SPACE_MAXSIZE,	/* maxsegsize */
578 				0,			/* flags */
579 				&sc->cmd_tag		/* tag */)) {
580 		tw_osli_printf(sc, "error = %d",
581 			TW_CL_SEVERITY_ERROR_STRING,
582 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
583 			0x200B,
584 			"Can't allocate DMA tag for Common Layer's "
585 			"DMA'able memory",
586 			ENOMEM);
587 		return(ENOMEM);
588 	}
589 
590 	if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
591 		BUS_DMA_NOWAIT, &sc->cmd_map)) {
592 		/* Try a second time. */
593 		if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
594 			BUS_DMA_NOWAIT, &sc->cmd_map)) {
595 			tw_osli_printf(sc, "error = %d",
596 				TW_CL_SEVERITY_ERROR_STRING,
597 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
598 				0x200C,
599 				"Can't allocate DMA'able memory for the"
600 				"Common Layer",
601 				ENOMEM);
602 			return(ENOMEM);
603 		}
604 	}
605 
606 	bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
607 		dma_mem_size, twa_map_load_callback,
608 		&sc->dma_mem_phys, 0);
609 
610 	/*
611 	 * Create a dma tag for data buffers; size will be the maximum
612 	 * possible I/O size (128kB).
613 	 */
614 	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
615 				sc->alignment,		/* alignment */
616 				0,			/* boundary */
617 				BUS_SPACE_MAXADDR,	/* lowaddr */
618 				BUS_SPACE_MAXADDR, 	/* highaddr */
619 				NULL, NULL, 		/* filter, filterarg */
620 				TW_CL_MAX_IO_SIZE,	/* maxsize */
621 				max_sg_elements,	/* nsegments */
622 				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
623 				BUS_DMA_ALLOCNOW,	/* flags */
624 				&sc->dma_tag		/* tag */)) {
625 		tw_osli_printf(sc, "error = %d",
626 			TW_CL_SEVERITY_ERROR_STRING,
627 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
628 			0x200F,
629 			"Can't allocate DMA tag for data buffers",
630 			ENOMEM);
631 		return(ENOMEM);
632 	}
633 
634 	/*
635 	 * Create a dma tag for ioctl data buffers; size will be the maximum
636 	 * possible I/O size (128kB).
637 	 */
638 	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
639 				sc->alignment,		/* alignment */
640 				0,			/* boundary */
641 				BUS_SPACE_MAXADDR,	/* lowaddr */
642 				BUS_SPACE_MAXADDR, 	/* highaddr */
643 				NULL, NULL, 		/* filter, filterarg */
644 				TW_CL_MAX_IO_SIZE,	/* maxsize */
645 				max_sg_elements,	/* nsegments */
646 				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
647 				BUS_DMA_ALLOCNOW,	/* flags */
648 				&sc->ioctl_tag		/* tag */)) {
649 		tw_osli_printf(sc, "error = %d",
650 			TW_CL_SEVERITY_ERROR_STRING,
651 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
652 			0x2010,
653 			"Can't allocate DMA tag for ioctl data buffers",
654 			ENOMEM);
655 		return(ENOMEM);
656 	}
657 
658 	/* Create just one map for all ioctl request data buffers. */
659 	if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
660 		tw_osli_printf(sc, "error = %d",
661 			TW_CL_SEVERITY_ERROR_STRING,
662 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
663 			0x2011,
664 			"Can't create ioctl map",
665 			ENOMEM);
666 		return(ENOMEM);
667 	}
668 
669 
670 	/* Initialize request queues. */
671 	tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
672 	tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
673 
674 	sc->req_ctx_buf = kmalloc((sizeof(struct tw_osli_req_context) *
675 	    TW_OSLI_MAX_NUM_REQUESTS), TW_OSLI_MALLOC_CLASS,
676 	    M_WAITOK | M_ZERO);
677 	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
678 		req = &(sc->req_ctx_buf[i]);
679 		req->ctlr = sc;
680 		if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
681 			tw_osli_printf(sc, "request # = %d, error = %d",
682 				TW_CL_SEVERITY_ERROR_STRING,
683 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
684 				0x2013,
685 				"Can't create dma map",
686 				i, ENOMEM);
687 			return(ENOMEM);
688 		}
689 
690 		/* Initialize the ioctl wakeup/ timeout mutex */
691 		req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
692 		lockinit(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", 0, 0);
693 
694 		/* Insert request into the free queue. */
695 		tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
696 	}
697 
698 	return(0);
699 }
700 
701 
702 
703 /*
704  * Function name:	tw_osli_free_resources
705  * Description:		Performs clean-up at the time of going down.
706  *
707  * Input:		sc	-- ptr to OSL internal ctlr context
708  * Output:		None
709  * Return value:	None
710  */
711 static TW_VOID
712 tw_osli_free_resources(struct twa_softc *sc)
713 {
714 	struct tw_osli_req_context	*req;
715 	TW_INT32			error = 0;
716 
717 	tw_osli_dbg_dprintf(3, sc, "entered");
718 
719 	/* Detach from CAM */
720 	tw_osli_cam_detach(sc);
721 
722 	if (sc->req_ctx_buf)
723 		while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
724 			NULL) {
725 			lockuninit(req->ioctl_wake_timeout_lock);
726 
727 			if ((error = bus_dmamap_destroy(sc->dma_tag,
728 					req->dma_map)))
729 				tw_osli_dbg_dprintf(1, sc,
730 					"dmamap_destroy(dma) returned %d",
731 					error);
732 		}
733 
734 	if ((sc->ioctl_tag) && (sc->ioctl_map))
735 		if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
736 			tw_osli_dbg_dprintf(1, sc,
737 				"dmamap_destroy(ioctl) returned %d", error);
738 
739 	/* Free all memory allocated so far. */
740 	if (sc->req_ctx_buf)
741 		kfree(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
742 
743 	if (sc->non_dma_mem)
744 		kfree(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
745 
746 	if (sc->dma_mem) {
747 		bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
748 		bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
749 			sc->cmd_map);
750 	}
751 	if (sc->cmd_tag)
752 		if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
753 			tw_osli_dbg_dprintf(1, sc,
754 				"dma_tag_destroy(cmd) returned %d", error);
755 
756 	if (sc->dma_tag)
757 		if ((error = bus_dma_tag_destroy(sc->dma_tag)))
758 			tw_osli_dbg_dprintf(1, sc,
759 				"dma_tag_destroy(dma) returned %d", error);
760 
761 	if (sc->ioctl_tag)
762 		if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
763 			tw_osli_dbg_dprintf(1, sc,
764 				"dma_tag_destroy(ioctl) returned %d", error);
765 
766 	if (sc->parent_tag)
767 		if ((error = bus_dma_tag_destroy(sc->parent_tag)))
768 			tw_osli_dbg_dprintf(1, sc,
769 				"dma_tag_destroy(parent) returned %d", error);
770 
771 
772 	/* Disconnect the interrupt handler. */
773 	if ((error = twa_teardown_intr(sc)))
774 			tw_osli_dbg_dprintf(1, sc,
775 				"teardown_intr returned %d", error);
776 
777 	if (sc->irq_res != NULL)
778 		if ((error = bus_release_resource(sc->bus_dev,
779 				SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
780 			tw_osli_dbg_dprintf(1, sc,
781 				"release_resource(irq) returned %d", error);
782 
783 	if (sc->irq_type == PCI_INTR_TYPE_MSI)
784 		pci_release_msi(sc->bus_dev);
785 
786 	/* Release the register window mapping. */
787 	if (sc->reg_res != NULL)
788 		if ((error = bus_release_resource(sc->bus_dev,
789 				SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
790 			tw_osli_dbg_dprintf(1, sc,
791 				"release_resource(io) returned %d", error);
792 
793 	/* Destroy the control device. */
794 	if (sc->ctrl_dev != NULL)
795 		destroy_dev(sc->ctrl_dev);
796 	dev_ops_remove_minor(&twa_ops, device_get_unit(sc->bus_dev));
797 
798 	if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
799 		tw_osli_dbg_dprintf(1, sc,
800 			"sysctl_ctx_free returned %d", error);
801 
802 }
803 
804 
805 
806 /*
807  * Function name:	twa_detach
808  * Description:		Called when the controller is being detached from
809  *			the pci bus.
810  *
811  * Input:		dev	-- bus device corresponding to the ctlr
812  * Output:		None
813  * Return value:	0	-- success
814  *			non-zero-- failure
815  */
816 static TW_INT32
817 twa_detach(device_t dev)
818 {
819 	struct twa_softc	*sc = device_get_softc(dev);
820 	TW_INT32		error;
821 
822 	tw_osli_dbg_dprintf(3, sc, "entered");
823 
824 	error = EBUSY;
825 	if (sc->open) {
826 		tw_osli_printf(sc, "error = %d",
827 			TW_CL_SEVERITY_ERROR_STRING,
828 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
829 			0x2014,
830 			"Device open",
831 			error);
832 		goto out;
833 	}
834 
835 	/* Shut the controller down. */
836 	if ((error = twa_shutdown(dev)))
837 		goto out;
838 
839 	/* Free all resources associated with this controller. */
840 	tw_osli_free_resources(sc);
841 	error = 0;
842 
843 out:
844 	return(error);
845 }
846 
847 
848 
849 /*
850  * Function name:	twa_shutdown
851  * Description:		Called at unload/shutdown time.  Lets the controller
852  *			know that we are going down.
853  *
854  * Input:		dev	-- bus device corresponding to the ctlr
855  * Output:		None
856  * Return value:	0	-- success
857  *			non-zero-- failure
858  */
859 static TW_INT32
860 twa_shutdown(device_t dev)
861 {
862 	struct twa_softc	*sc = device_get_softc(dev);
863 	TW_INT32		error = 0;
864 
865 	tw_osli_dbg_dprintf(3, sc, "entered");
866 
867 	/* Disconnect interrupts. */
868 	error = twa_teardown_intr(sc);
869 
870 	/* Stop watchdog task. */
871 	callout_stop_sync(&(sc->watchdog_callout[0]));
872 	callout_stop_sync(&(sc->watchdog_callout[1]));
873 
874 	/* Disconnect from the controller. */
875 	if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
876 		tw_osli_printf(sc, "error = %d",
877 			TW_CL_SEVERITY_ERROR_STRING,
878 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
879 			0x2015,
880 			"Failed to shutdown Common Layer/controller",
881 			error);
882 	}
883 	return(error);
884 }
885 
886 
887 
888 /*
889  * Function name:	twa_pci_intr
890  * Description:		Interrupt handler.  Wrapper for twa_interrupt.
891  *
892  * Input:		arg	-- ptr to OSL internal ctlr context
893  * Output:		None
894  * Return value:	None
895  */
896 static TW_VOID
897 twa_pci_intr(TW_VOID *arg)
898 {
899 	struct twa_softc	*sc = (struct twa_softc *)arg;
900 
901 	tw_osli_dbg_dprintf(10, sc, "entered");
902 	tw_cl_interrupt(&(sc->ctlr_handle));
903 }
904 
905 
906 /*
907  * Function name:	tw_osli_fw_passthru
908  * Description:		Builds a fw passthru cmd pkt, and submits it to CL.
909  *
910  * Input:		sc	-- ptr to OSL internal ctlr context
911  *			buf	-- ptr to ioctl pkt understood by CL
912  * Output:		None
913  * Return value:	0	-- success
914  *			non-zero-- failure
915  */
916 TW_INT32
917 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
918 {
919 	struct tw_osli_req_context		*req;
920 	struct tw_osli_ioctl_no_data_buf	*user_buf =
921 		(struct tw_osli_ioctl_no_data_buf *)buf;
922 	TW_TIME					end_time;
923 	TW_UINT32				timeout = 60;
924 	TW_UINT32				data_buf_size_adjusted;
925 	struct tw_cl_req_packet			*req_pkt;
926 	struct tw_cl_passthru_req_packet	*pt_req;
927 	TW_INT32				error;
928 
929 	tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
930 
931 	if ((req = tw_osli_get_request(sc)) == NULL)
932 		return(EBUSY);
933 
934 	req->req_handle.osl_req_ctxt = req;
935 	req->orig_req = buf;
936 	req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
937 
938 	req_pkt = &(req->req_pkt);
939 	req_pkt->status = 0;
940 	req_pkt->tw_osl_callback = tw_osl_complete_passthru;
941 	/* Let the Common Layer retry the request on cmd queue full. */
942 	req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
943 
944 	pt_req = &(req_pkt->gen_req_pkt.pt_req);
945 	/*
946 	 * Make sure that the data buffer sent to firmware is a
947 	 * 512 byte multiple in size.
948 	 */
949 	data_buf_size_adjusted =
950 		(user_buf->driver_pkt.buffer_length +
951 		(sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
952 	if ((req->length = data_buf_size_adjusted)) {
953 		req->data = kmalloc(data_buf_size_adjusted,
954 		    TW_OSLI_MALLOC_CLASS, M_WAITOK);
955 		/* Copy the payload. */
956 		if ((error = copyin((TW_VOID *)(user_buf->pdata),
957 			req->data,
958 			user_buf->driver_pkt.buffer_length)) != 0) {
959 			tw_osli_printf(sc, "error = %d",
960 				TW_CL_SEVERITY_ERROR_STRING,
961 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
962 				0x2017,
963 				"Could not copyin fw_passthru data_buf",
964 				error);
965 			goto fw_passthru_err;
966 		}
967 		pt_req->sgl_entries = 1; /* will be updated during mapping */
968 		req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
969 			TW_OSLI_REQ_FLAGS_DATA_OUT);
970 	} else
971 		pt_req->sgl_entries = 0; /* no payload */
972 
973 	pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
974 	pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
975 
976 	if ((error = tw_osli_map_request(req)))
977 		goto fw_passthru_err;
978 
979 	end_time = tw_osl_get_local_time() + timeout;
980 	while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
981 		lockmgr(req->ioctl_wake_timeout_lock, LK_EXCLUSIVE);
982 		req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
983 
984 		error = lksleep(req, req->ioctl_wake_timeout_lock, 0,
985 			    "twa_passthru", timeout*hz);
986 		lockmgr(req->ioctl_wake_timeout_lock, LK_RELEASE);
987 
988 		if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
989 			error = 0;
990 		req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
991 
992 		if (! error) {
993 			if (((error = req->error_code)) ||
994 				((error = (req->state !=
995 				TW_OSLI_REQ_STATE_COMPLETE))) ||
996 				((error = req_pkt->status)))
997 				goto fw_passthru_err;
998 			break;
999 		}
1000 
1001 		if (req_pkt->status) {
1002 			error = req_pkt->status;
1003 			goto fw_passthru_err;
1004 		}
1005 
1006 		if (error == EWOULDBLOCK) {
1007 			/* Time out! */
1008 			if ((!(req->error_code))                       &&
1009 			    (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1010 			    (!(req_pkt->status))			  ) {
1011 #ifdef    TW_OSL_DEBUG
1012 				tw_osli_printf(sc, "request = %p",
1013 					TW_CL_SEVERITY_ERROR_STRING,
1014 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1015 					0x7777,
1016 					"FALSE Passthru timeout!",
1017 					req);
1018 #endif /* TW_OSL_DEBUG */
1019 				error = 0; /* False error */
1020 				break;
1021 			}
1022 			if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1023 #ifdef    TW_OSL_DEBUG
1024 				tw_osli_printf(sc, "request = %p",
1025 					TW_CL_SEVERITY_ERROR_STRING,
1026 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1027 					0x2018,
1028 					"Passthru request timed out!",
1029 					req);
1030 #else  /* TW_OSL_DEBUG */
1031 			device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1032 #endif /* TW_OSL_DEBUG */
1033 				tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1034 			}
1035 
1036 			error = 0;
1037 			end_time = tw_osl_get_local_time() + timeout;
1038 			continue;
1039 			/*
1040 			 * Don't touch req after a reset.  It (and any
1041 			 * associated data) will be
1042 			 * unmapped by the callback.
1043 			 */
1044 		}
1045 		/*
1046 		 * Either the request got completed, or we were woken up by a
1047 		 * signal.  Calculate the new timeout, in case it was the latter.
1048 		 */
1049 		timeout = (end_time - tw_osl_get_local_time());
1050 	} /* End of while loop */
1051 
1052 	/* If there was a payload, copy it back. */
1053 	if ((!error) && (req->length))
1054 		if ((error = copyout(req->data, user_buf->pdata,
1055 			user_buf->driver_pkt.buffer_length)))
1056 			tw_osli_printf(sc, "error = %d",
1057 				TW_CL_SEVERITY_ERROR_STRING,
1058 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1059 				0x2019,
1060 				"Could not copyout fw_passthru data_buf",
1061 				error);
1062 
1063 fw_passthru_err:
1064 
1065 	if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1066 		error = EBUSY;
1067 
1068 	user_buf->driver_pkt.os_status = error;
1069 	/* Free resources. */
1070 	if (req->data)
1071 		kfree(req->data, TW_OSLI_MALLOC_CLASS);
1072 	tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1073 	return(error);
1074 }
1075 
1076 
1077 
1078 /*
1079  * Function name:	tw_osl_complete_passthru
1080  * Description:		Called to complete passthru requests.
1081  *
1082  * Input:		req_handle	-- ptr to request handle
1083  * Output:		None
1084  * Return value:	None
1085  */
1086 TW_VOID
1087 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1088 {
1089 	struct tw_osli_req_context	*req = req_handle->osl_req_ctxt;
1090 	struct tw_cl_req_packet		*req_pkt =
1091 		(struct tw_cl_req_packet *)(&req->req_pkt);
1092 	struct twa_softc		*sc = req->ctlr;
1093 
1094 	tw_osli_dbg_dprintf(5, sc, "entered");
1095 
1096 	if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1097 		tw_osli_printf(sc, "request = %p, status = %d",
1098 			TW_CL_SEVERITY_ERROR_STRING,
1099 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1100 			0x201B,
1101 			"Unposted command completed!!",
1102 			req, req->state);
1103 	}
1104 
1105 	/*
1106 	 * Remove request from the busy queue.  Just mark it complete.
1107 	 * There's no need to move it into the complete queue as we are
1108 	 * going to be done with it right now.
1109 	 */
1110 	req->state = TW_OSLI_REQ_STATE_COMPLETE;
1111 	tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1112 
1113 	tw_osli_unmap_request(req);
1114 
1115 	/*
1116 	 * Don't do a wake up if there was an error even before the request
1117 	 * was sent down to the Common Layer, and we hadn't gotten an
1118 	 * EINPROGRESS.  The request originator will then be returned an
1119 	 * error, and he can do the clean-up.
1120 	 */
1121 	if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1122 		return;
1123 
1124 	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1125 		if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1126 			/* Wake up the sleeping command originator. */
1127 			tw_osli_dbg_dprintf(5, sc,
1128 				"Waking up originator of request %p", req);
1129 			req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1130 			wakeup_one(req);
1131 		} else {
1132 			/*
1133 			 * If the request completed even before mtx_sleep
1134 			 * was called, simply return.
1135 			 */
1136 			if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1137 				return;
1138 
1139 			if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1140 				return;
1141 
1142 			tw_osli_printf(sc, "request = %p",
1143 				TW_CL_SEVERITY_ERROR_STRING,
1144 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1145 				0x201C,
1146 				"Passthru callback called, "
1147 				"and caller not sleeping",
1148 				req);
1149 		}
1150 	} else {
1151 		tw_osli_printf(sc, "request = %p",
1152 			TW_CL_SEVERITY_ERROR_STRING,
1153 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1154 			0x201D,
1155 			"Passthru callback called for non-passthru request",
1156 			req);
1157 	}
1158 }
1159 
1160 
1161 
1162 /*
1163  * Function name:	tw_osli_get_request
1164  * Description:		Gets a request pkt from the free queue.
1165  *
1166  * Input:		sc	-- ptr to OSL internal ctlr context
1167  * Output:		None
1168  * Return value:	ptr to request pkt	-- success
1169  *			NULL			-- failure
1170  */
1171 struct tw_osli_req_context *
1172 tw_osli_get_request(struct twa_softc *sc)
1173 {
1174 	struct tw_osli_req_context	*req;
1175 
1176 	tw_osli_dbg_dprintf(4, sc, "entered");
1177 
1178 	/* Get a free request packet. */
1179 	req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1180 
1181 	/* Initialize some fields to their defaults. */
1182 	if (req) {
1183 		req->req_handle.osl_req_ctxt = NULL;
1184 		req->req_handle.cl_req_ctxt = NULL;
1185 		req->req_handle.is_io = 0;
1186 		req->data = NULL;
1187 		req->length = 0;
1188 		req->deadline = 0;
1189 		req->real_data = NULL;
1190 		req->real_length = 0;
1191 		req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1192 		req->flags = 0;
1193 		req->error_code = 0;
1194 		req->orig_req = NULL;
1195 
1196 		bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1197 
1198 	}
1199 	return(req);
1200 }
1201 
1202 
1203 
1204 /*
1205  * Function name:	twa_map_load_data_callback
1206  * Description:		Callback of bus_dmamap_load for the buffer associated
1207  *			with data.  Updates the cmd pkt (size/sgl_entries
1208  *			fields, as applicable) to reflect the number of sg
1209  *			elements.
1210  *
1211  * Input:		arg	-- ptr to OSL internal request context
1212  *			segs	-- ptr to a list of segment descriptors
1213  *			nsegments--# of segments
1214  *			error	-- 0 if no errors encountered before callback,
1215  *				   non-zero if errors were encountered
1216  * Output:		None
1217  * Return value:	None
1218  */
1219 static TW_VOID
1220 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1221 	TW_INT32 nsegments, TW_INT32 error)
1222 {
1223 	struct tw_osli_req_context	*req =
1224 		(struct tw_osli_req_context *)arg;
1225 	struct twa_softc		*sc = req->ctlr;
1226 	struct tw_cl_req_packet		*req_pkt = &(req->req_pkt);
1227 
1228 	tw_osli_dbg_dprintf(10, sc, "entered");
1229 
1230 	if (error == EINVAL) {
1231 		req->error_code = error;
1232 		return;
1233 	}
1234 
1235 	/* Mark the request as currently being processed. */
1236 	req->state = TW_OSLI_REQ_STATE_BUSY;
1237 	/* Move the request into the busy queue. */
1238 	tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1239 
1240 	req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1241 
1242 	if (error == EFBIG) {
1243 		req->error_code = error;
1244 		goto out;
1245 	}
1246 
1247 	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1248 		struct tw_cl_passthru_req_packet	*pt_req;
1249 
1250 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1251 			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1252 				BUS_DMASYNC_PREREAD);
1253 
1254 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1255 			/*
1256 			 * If we're using an alignment buffer, and we're
1257 			 * writing data, copy the real data out.
1258 			 */
1259 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1260 				bcopy(req->real_data, req->data, req->real_length);
1261 			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1262 				BUS_DMASYNC_PREWRITE);
1263 		}
1264 
1265 		pt_req = &(req_pkt->gen_req_pkt.pt_req);
1266 		pt_req->sg_list = (TW_UINT8 *)segs;
1267 		pt_req->sgl_entries += (nsegments - 1);
1268 		error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1269 			&(req->req_handle));
1270 	} else {
1271 		struct tw_cl_scsi_req_packet	*scsi_req;
1272 
1273 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1274 			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1275 				BUS_DMASYNC_PREREAD);
1276 
1277 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1278 			/*
1279 			 * If we're using an alignment buffer, and we're
1280 			 * writing data, copy the real data out.
1281 			 */
1282 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1283 				bcopy(req->real_data, req->data, req->real_length);
1284 			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1285 				BUS_DMASYNC_PREWRITE);
1286 		}
1287 
1288 		scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1289 		scsi_req->sg_list = (TW_UINT8 *)segs;
1290 		scsi_req->sgl_entries += (nsegments - 1);
1291 		error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1292 			&(req->req_handle));
1293 	}
1294 
1295 out:
1296 	if (error) {
1297 		req->error_code = error;
1298 		req_pkt->tw_osl_callback(&(req->req_handle));
1299 		/*
1300 		 * If the caller had been returned EINPROGRESS, and he has
1301 		 * registered a callback for handling completion, the callback
1302 		 * will never get called because we were unable to submit the
1303 		 * request.  So, free up the request right here.
1304 		 */
1305 		if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1306 			tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1307 	}
1308 }
1309 
1310 
1311 
1312 /*
1313  * Function name:	twa_map_load_callback
1314  * Description:		Callback of bus_dmamap_load for the buffer associated
1315  *			with a cmd pkt.
1316  *
1317  * Input:		arg	-- ptr to variable to hold phys addr
1318  *			segs	-- ptr to a list of segment descriptors
1319  *			nsegments--# of segments
1320  *			error	-- 0 if no errors encountered before callback,
1321  *				   non-zero if errors were encountered
1322  * Output:		None
1323  * Return value:	None
1324  */
1325 static TW_VOID
1326 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1327 	TW_INT32 nsegments, TW_INT32 error)
1328 {
1329 	*((bus_addr_t *)arg) = segs[0].ds_addr;
1330 }
1331 
1332 
1333 
1334 /*
1335  * Function name:	tw_osli_map_request
1336  * Description:		Maps a cmd pkt and data associated with it, into
1337  *			DMA'able memory.
1338  *
1339  * Input:		req	-- ptr to request pkt
1340  * Output:		None
1341  * Return value:	0	-- success
1342  *			non-zero-- failure
1343  */
1344 TW_INT32
1345 tw_osli_map_request(struct tw_osli_req_context *req)
1346 {
1347 	struct twa_softc	*sc = req->ctlr;
1348 	TW_INT32		error = 0;
1349 
1350 	tw_osli_dbg_dprintf(10, sc, "entered");
1351 
1352 	/* If the command involves data, map that too. */
1353 	if (req->data != NULL) {
1354 		/*
1355 		 * It's sufficient for the data pointer to be 4-byte aligned
1356 		 * to work with 9000.  However, if 4-byte aligned addresses
1357 		 * are passed to bus_dmamap_load, we can get back sg elements
1358 		 * that are not 512-byte multiples in size.  So, we will let
1359 		 * only those buffers that are 512-byte aligned to pass
1360 		 * through, and bounce the rest, so as to make sure that we
1361 		 * always get back sg elements that are 512-byte multiples
1362 		 * in size.
1363 		 */
1364 		if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1365 			(req->length % sc->sg_size_factor)) {
1366 			req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1367 			/* Save original data pointer and length. */
1368 			req->real_data = req->data;
1369 			req->real_length = req->length;
1370 			req->length = (req->length +
1371 				(sc->sg_size_factor - 1)) &
1372 				~(sc->sg_size_factor - 1);
1373 			req->data = kmalloc(req->length, TW_OSLI_MALLOC_CLASS,
1374 					M_NOWAIT);
1375 			if (req->data == NULL) {
1376 				tw_osli_printf(sc, "error = %d",
1377 					TW_CL_SEVERITY_ERROR_STRING,
1378 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1379 					0x201E,
1380 					"Failed to allocate memory "
1381 					"for bounce buffer",
1382 					ENOMEM);
1383 				/* Restore original data pointer and length. */
1384 				req->data = req->real_data;
1385 				req->length = req->real_length;
1386 				return(ENOMEM);
1387 			}
1388 		}
1389 
1390 		/*
1391 		 * Map the data buffer into bus space and build the SG list.
1392 		 */
1393 		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1394 			/* Lock against multiple simultaneous ioctl calls. */
1395 			spin_lock(sc->io_lock);
1396 			error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1397 				req->data, req->length,
1398 				twa_map_load_data_callback, req,
1399 				BUS_DMA_WAITOK);
1400 			spin_unlock(sc->io_lock);
1401 		} else {
1402 			/*
1403 			 * There's only one CAM I/O thread running at a time.
1404 			 * So, there's no need to hold the io_lock.
1405 			 */
1406 			error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1407 				req->data, req->length,
1408 				twa_map_load_data_callback, req,
1409 				BUS_DMA_WAITOK);
1410 		}
1411 
1412 		if (!error)
1413 			error = req->error_code;
1414 		else {
1415 			if (error == EINPROGRESS) {
1416 				/*
1417 				 * Specifying sc->io_lock as the lockfuncarg
1418 				 * in ...tag_create should protect the access
1419 				 * of ...FLAGS_MAPPED from the callback.
1420 				 */
1421 				spin_lock(sc->io_lock);
1422 				if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1423 					req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1424 				tw_osli_disallow_new_requests(sc, &(req->req_handle));
1425 				spin_unlock(sc->io_lock);
1426 				error = 0;
1427 			} else {
1428 				tw_osli_printf(sc, "error = %d",
1429 					TW_CL_SEVERITY_ERROR_STRING,
1430 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1431 					0x9999,
1432 					"Failed to map DMA memory "
1433 					"for I/O request",
1434 					error);
1435 				req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1436 				/* Free alignment buffer if it was used. */
1437 				if (req->flags &
1438 					TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1439 					kfree(req->data, TW_OSLI_MALLOC_CLASS);
1440 					/*
1441 					 * Restore original data pointer
1442 					 * and length.
1443 					 */
1444 					req->data = req->real_data;
1445 					req->length = req->real_length;
1446 				}
1447 			}
1448 		}
1449 
1450 	} else {
1451 		/* Mark the request as currently being processed. */
1452 		req->state = TW_OSLI_REQ_STATE_BUSY;
1453 		/* Move the request into the busy queue. */
1454 		tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1455 		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1456 			error = tw_cl_fw_passthru(&sc->ctlr_handle,
1457 					&(req->req_pkt), &(req->req_handle));
1458 		else
1459 			error = tw_cl_start_io(&sc->ctlr_handle,
1460 					&(req->req_pkt), &(req->req_handle));
1461 		if (error) {
1462 			req->error_code = error;
1463 			req->req_pkt.tw_osl_callback(&(req->req_handle));
1464 		}
1465 	}
1466 	return(error);
1467 }
1468 
1469 
1470 
1471 /*
1472  * Function name:	tw_osli_unmap_request
1473  * Description:		Undoes the mapping done by tw_osli_map_request.
1474  *
1475  * Input:		req	-- ptr to request pkt
1476  * Output:		None
1477  * Return value:	None
1478  */
1479 TW_VOID
1480 tw_osli_unmap_request(struct tw_osli_req_context *req)
1481 {
1482 	struct twa_softc	*sc = req->ctlr;
1483 
1484 	tw_osli_dbg_dprintf(10, sc, "entered");
1485 
1486 	/* If the command involved data, unmap that too. */
1487 	if (req->data != NULL) {
1488 		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1489 			/* Lock against multiple simultaneous ioctl calls. */
1490 			spin_lock(sc->io_lock);
1491 
1492 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1493 				bus_dmamap_sync(sc->ioctl_tag,
1494 					sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1495 
1496 				/*
1497 				 * If we are using a bounce buffer, and we are
1498 				 * reading data, copy the real data in.
1499 				 */
1500 				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1501 					bcopy(req->data, req->real_data,
1502 						req->real_length);
1503 			}
1504 
1505 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1506 				bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1507 					BUS_DMASYNC_POSTWRITE);
1508 
1509 			bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1510 
1511 			spin_unlock(sc->io_lock);
1512 		} else {
1513 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1514 				bus_dmamap_sync(sc->dma_tag,
1515 					req->dma_map, BUS_DMASYNC_POSTREAD);
1516 
1517 				/*
1518 				 * If we are using a bounce buffer, and we are
1519 				 * reading data, copy the real data in.
1520 				 */
1521 				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1522 					bcopy(req->data, req->real_data,
1523 						req->real_length);
1524 			}
1525 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1526 				bus_dmamap_sync(sc->dma_tag, req->dma_map,
1527 					BUS_DMASYNC_POSTWRITE);
1528 
1529 			bus_dmamap_unload(sc->dma_tag, req->dma_map);
1530 		}
1531 	}
1532 
1533 	/* Free alignment buffer if it was used. */
1534 	if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1535 		kfree(req->data, TW_OSLI_MALLOC_CLASS);
1536 		/* Restore original data pointer and length. */
1537 		req->data = req->real_data;
1538 		req->length = req->real_length;
1539 	}
1540 }
1541 
1542 
1543 
1544 #ifdef TW_OSL_DEBUG
1545 
1546 TW_VOID	twa_report_stats(TW_VOID);
1547 TW_VOID	twa_reset_stats(TW_VOID);
1548 TW_VOID	tw_osli_print_ctlr_stats(struct twa_softc *sc);
1549 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1550 
1551 
1552 /*
1553  * Function name:	twa_report_stats
1554  * Description:		For being called from ddb.  Calls functions that print
1555  *			OSL and CL internal stats for the controller.
1556  *
1557  * Input:		None
1558  * Output:		None
1559  * Return value:	None
1560  */
1561 TW_VOID
1562 twa_report_stats(TW_VOID)
1563 {
1564 	struct twa_softc	*sc;
1565 	TW_INT32		i;
1566 
1567 	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1568 		tw_osli_print_ctlr_stats(sc);
1569 		tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1570 	}
1571 }
1572 
1573 
1574 
1575 /*
1576  * Function name:	tw_osli_print_ctlr_stats
1577  * Description:		For being called from ddb.  Prints OSL controller stats
1578  *
1579  * Input:		sc	-- ptr to OSL internal controller context
1580  * Output:		None
1581  * Return value:	None
1582  */
1583 TW_VOID
1584 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1585 {
1586 	twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1587 	twa_printf(sc, "OSLq type  current  max\n");
1588 	twa_printf(sc, "free      %04d     %04d\n",
1589 		sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1590 		sc->q_stats[TW_OSLI_FREE_Q].max_len);
1591 	twa_printf(sc, "busy      %04d     %04d\n",
1592 		sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1593 		sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1594 }
1595 
1596 
1597 
1598 /*
1599  * Function name:	twa_print_req_info
1600  * Description:		For being called from ddb.  Calls functions that print
1601  *			OSL and CL internal details for the request.
1602  *
1603  * Input:		req	-- ptr to OSL internal request context
1604  * Output:		None
1605  * Return value:	None
1606  */
1607 TW_VOID
1608 twa_print_req_info(struct tw_osli_req_context *req)
1609 {
1610 	struct twa_softc	*sc = req->ctlr;
1611 
1612 	twa_printf(sc, "OSL details for request:\n");
1613 	twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1614 		"data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1615 		"state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1616 		"next_req = %p, prev_req = %p, dma_map = %p\n",
1617 		req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1618 		req->data, req->length, req->real_data, req->real_length,
1619 		req->state, req->flags, req->error_code, req->orig_req,
1620 		req->link.next, req->link.prev, req->dma_map);
1621 	tw_cl_print_req_info(&(req->req_handle));
1622 }
1623 
1624 
1625 
1626 /*
1627  * Function name:	twa_reset_stats
1628  * Description:		For being called from ddb.
1629  *			Resets some OSL controller stats.
1630  *
1631  * Input:		None
1632  * Output:		None
1633  * Return value:	None
1634  */
1635 TW_VOID
1636 twa_reset_stats(TW_VOID)
1637 {
1638 	struct twa_softc	*sc;
1639 	TW_INT32		i;
1640 
1641 	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1642 		sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1643 		sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1644 		tw_cl_reset_stats(&sc->ctlr_handle);
1645 	}
1646 }
1647 
1648 #endif /* TW_OSL_DEBUG */
1649