xref: /dragonfly/sys/dev/raid/twa/tw_osl_freebsd.c (revision 03517d4e)
1 /*
2  * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3  * Copyright (c) 2004-05 Vinod Kashyap.
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2000 BSDi
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	$FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 254263 2013-08-12 23:30:01Z scottl $
30  */
31 
32 /*
33  * AMCC'S 3ware driver for 9000 series storage controllers.
34  *
35  * Author: Vinod Kashyap
36  * Modifications by: Adam Radford
37  * Modifications by: Manjunath Ranganathaiah
38  */
39 
40 
41 /*
42  * FreeBSD specific functions not related to CAM, and other
43  * miscellaneous functions.
44  */
45 
46 
47 #include <dev/raid/twa/tw_osl_includes.h>
48 #include <dev/raid/twa/tw_cl_fwif.h>
49 #include <dev/raid/twa/tw_cl_ioctl.h>
50 #include <dev/raid/twa/tw_osl_ioctl.h>
51 
52 #ifdef TW_OSL_DEBUG
53 TW_INT32	TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
54 TW_INT32	TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
55 #endif /* TW_OSL_DEBUG */
56 
57 static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
58 
59 
60 static	d_open_t		twa_open;
61 static	d_close_t		twa_close;
62 static	d_ioctl_t		twa_ioctl;
63 
64 static struct dev_ops twa_ops = {
65 	{ "twa", 0, D_MPSAFE },
66 	.d_open =	twa_open,
67 	.d_close =	twa_close,
68 	.d_ioctl =	twa_ioctl,
69 };
70 
71 static devclass_t	twa_devclass;
72 
73 static int		twa_msi_enable = 0;
74 TUNABLE_INT("hw.twa.msi.enable", &twa_msi_enable);
75 
76 
77 /*
78  * Function name:	twa_open
79  * Description:		Called when the controller is opened.
80  *			Simply marks the controller as open.
81  *
82  * Input:		dev	-- control device corresponding to the ctlr
83  *			flags	-- mode of open
84  *			fmt	-- device type (character/block etc.)
85  *			proc	-- current process
86  * Output:		None
87  * Return value:	0	-- success
88  *			non-zero-- failure
89  */
90 static TW_INT32
91 twa_open(struct dev_open_args *ap)
92 {
93 	cdev_t			dev = ap->a_head.a_dev;
94 	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
95 
96 	tw_osli_dbg_dprintf(5, sc, "entered");
97 	sc->open = TW_CL_TRUE;
98 	return(0);
99 }
100 
101 
102 
103 /*
104  * Function name:	twa_close
105  * Description:		Called when the controller is closed.
106  *			Simply marks the controller as not open.
107  *
108  * Input:		dev	-- control device corresponding to the ctlr
109  *			flags	-- mode of corresponding open
110  *			fmt	-- device type (character/block etc.)
111  *			proc	-- current process
112  * Output:		None
113  * Return value:	0	-- success
114  *			non-zero-- failure
115  */
116 static TW_INT32
117 twa_close(struct dev_close_args *ap)
118 {
119 	cdev_t			dev = ap->a_head.a_dev;
120 	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
121 
122 	tw_osli_dbg_dprintf(5, sc, "entered");
123 	sc->open = TW_CL_FALSE;
124 	return(0);
125 }
126 
127 
128 
129 /*
130  * Function name:	twa_ioctl
131  * Description:		Called when an ioctl is posted to the controller.
132  *			Handles any OS Layer specific cmds, passes the rest
133  *			on to the Common Layer.
134  *
135  * Input:		dev	-- control device corresponding to the ctlr
136  *			cmd	-- ioctl cmd
137  *			buf	-- ptr to buffer in kernel memory, which is
138  *				   a copy of the input buffer in user-space
139  *			flags	-- mode of corresponding open
140  *			proc	-- current process
141  * Output:		buf	-- ptr to buffer in kernel memory, which will
142  *				   be copied to the output buffer in user-space
143  * Return value:	0	-- success
144  *			non-zero-- failure
145  */
146 static TW_INT32
147 twa_ioctl(struct dev_ioctl_args *ap)
148 {
149 	cdev_t			dev = ap->a_head.a_dev;
150 	u_long			cmd = ap->a_cmd;
151 	caddr_t			buf = ap->a_data;
152 	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
153 	TW_INT32		error;
154 
155 	tw_osli_dbg_dprintf(5, sc, "entered");
156 
157 	switch (cmd) {
158 	case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
159 		tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
160 		error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
161 		break;
162 
163 	case TW_OSL_IOCTL_SCAN_BUS:
164 		/* Request CAM for a bus scan. */
165 		tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
166 		error = tw_osli_request_bus_scan(sc);
167 		break;
168 
169 	default:
170 		tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
171 		error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
172 		break;
173 	}
174 	return(error);
175 }
176 
177 
178 
179 static TW_INT32	twa_probe(device_t dev);
180 static TW_INT32	twa_attach(device_t dev);
181 static TW_INT32	twa_detach(device_t dev);
182 static TW_INT32	twa_shutdown(device_t dev);
183 static TW_VOID	twa_pci_intr(TW_VOID *arg);
184 static TW_VOID	twa_watchdog(TW_VOID *arg);
185 int twa_setup_intr(struct twa_softc *sc);
186 int twa_teardown_intr(struct twa_softc *sc);
187 
188 static TW_INT32	tw_osli_alloc_mem(struct twa_softc *sc);
189 static TW_VOID	tw_osli_free_resources(struct twa_softc *sc);
190 
191 static TW_VOID	twa_map_load_data_callback(TW_VOID *arg,
192 	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
193 static TW_VOID	twa_map_load_callback(TW_VOID *arg,
194 	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
195 
196 
197 static device_method_t	twa_methods[] = {
198 	/* Device interface */
199 	DEVMETHOD(device_probe,		twa_probe),
200 	DEVMETHOD(device_attach,	twa_attach),
201 	DEVMETHOD(device_detach,	twa_detach),
202 	DEVMETHOD(device_shutdown,	twa_shutdown),
203 
204 	DEVMETHOD_END
205 };
206 
207 static driver_t	twa_pci_driver = {
208 	"twa",
209 	twa_methods,
210 	sizeof(struct twa_softc)
211 };
212 
213 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, NULL, NULL);
214 MODULE_DEPEND(twa, cam, 1, 1, 1);
215 MODULE_DEPEND(twa, pci, 1, 1, 1);
216 MODULE_VERSION(twa, 1);
217 
218 
219 /*
220  * Function name:	twa_probe
221  * Description:		Called at driver load time.  Claims 9000 ctlrs.
222  *
223  * Input:		dev	-- bus device corresponding to the ctlr
224  * Output:		None
225  * Return value:	<= 0	-- success
226  *			> 0	-- failure
227  */
228 static TW_INT32
229 twa_probe(device_t dev)
230 {
231 	static TW_UINT8	first_ctlr = 1;
232 
233 	tw_osli_dbg_printf(3, "entered");
234 
235 	if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
236 		device_set_desc(dev, TW_OSLI_DEVICE_NAME);
237 		/* Print the driver version only once. */
238 		if (first_ctlr) {
239 			kprintf("3ware device driver for 9000 series storage "
240 				"controllers, version: %s\n",
241 				TW_OSL_DRIVER_VERSION_STRING);
242 			first_ctlr = 0;
243 		}
244 		return(0);
245 	}
246 	return(ENXIO);
247 }
248 
249 int twa_setup_intr(struct twa_softc *sc)
250 {
251 	int error = 0;
252 
253 	if (!(sc->intr_handle) && (sc->irq_res)) {
254 		error = bus_setup_intr(sc->bus_dev, sc->irq_res,
255 					INTR_MPSAFE,
256 					twa_pci_intr,
257 					sc, &sc->intr_handle, NULL);
258 	}
259 	return( error );
260 }
261 
262 
263 int twa_teardown_intr(struct twa_softc *sc)
264 {
265 	int error = 0;
266 
267 	if ((sc->intr_handle) && (sc->irq_res)) {
268 		error = bus_teardown_intr(sc->bus_dev,
269 						sc->irq_res, sc->intr_handle);
270 		sc->intr_handle = NULL;
271 	}
272 	return( error );
273 }
274 
275 
276 
277 /*
278  * Function name:	twa_attach
279  * Description:		Allocates pci resources; updates sc; adds a node to the
280  *			sysctl tree to expose the driver version; makes calls
281  *			(to the Common Layer) to initialize ctlr, and to
282  *			attach to CAM.
283  *
284  * Input:		dev	-- bus device corresponding to the ctlr
285  * Output:		None
286  * Return value:	0	-- success
287  *			non-zero-- failure
288  */
289 static TW_INT32
290 twa_attach(device_t dev)
291 {
292 	struct twa_softc	*sc = device_get_softc(dev);
293 	TW_INT32		bar_num;
294 	TW_INT32		bar0_offset;
295 	TW_INT32		bar_size;
296 	TW_INT32		irq_flags;
297 	TW_INT32		error;
298 
299 	sc->ctlr_handle.osl_ctlr_ctxt = sc;
300 
301 	/* Initialize the softc structure. */
302 	sc->bus_dev = dev;
303 	tw_osli_dbg_dprintf(3, sc, "entered");
304 	sc->device_id = pci_get_device(dev);
305 
306 	/* Initialize the mutexes right here. */
307 	sc->io_lock = &(sc->io_lock_handle);
308 	spin_init(sc->io_lock, "twa_iolock");
309 	sc->q_lock = &(sc->q_lock_handle);
310 	spin_init(sc->q_lock, "twa_qlock");
311 	sc->sim_lock = &(sc->sim_lock_handle);
312 	lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE);
313 
314 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
315 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
316 		OID_AUTO, "driver_version", CTLFLAG_RD,
317 		TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
318 
319 	/* Force the busmaster enable bit on, in case the BIOS forgot. */
320 	pci_enable_busmaster(dev);
321 
322 	/* Allocate the PCI register window. */
323 	if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
324 		&bar_num, &bar0_offset, &bar_size))) {
325 		tw_osli_printf(sc, "error = %d",
326 			TW_CL_SEVERITY_ERROR_STRING,
327 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
328 			0x201F,
329 			"Can't get PCI BAR info",
330 			error);
331 		tw_osli_free_resources(sc);
332 		return(error);
333 	}
334 	sc->reg_res_id = PCIR_BARS + bar0_offset;
335 	if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
336 				&(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
337 				== NULL) {
338 		tw_osli_printf(sc, "error = %d",
339 			TW_CL_SEVERITY_ERROR_STRING,
340 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
341 			0x2002,
342 			"Can't allocate register window",
343 			ENXIO);
344 		tw_osli_free_resources(sc);
345 		return(ENXIO);
346 	}
347 	sc->bus_tag = rman_get_bustag(sc->reg_res);
348 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
349 
350 	/* Allocate and register our interrupt. */
351 	sc->irq_res_id = 0;
352 	sc->irq_type = pci_alloc_1intr(sc->bus_dev, twa_msi_enable,
353 	    &sc->irq_res_id, &irq_flags);
354 	if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
355 				&(sc->irq_res_id), 0, ~0, 1,
356 				irq_flags)) == NULL) {
357 		tw_osli_printf(sc, "error = %d",
358 			TW_CL_SEVERITY_ERROR_STRING,
359 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
360 			0x2003,
361 			"Can't allocate interrupt",
362 			ENXIO);
363 		tw_osli_free_resources(sc);
364 		return(ENXIO);
365 	}
366 	if ((error = twa_setup_intr(sc))) {
367 		tw_osli_printf(sc, "error = %d",
368 			TW_CL_SEVERITY_ERROR_STRING,
369 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
370 			0x2004,
371 			"Can't set up interrupt",
372 			error);
373 		tw_osli_free_resources(sc);
374 		return(error);
375 	}
376 
377 	if ((error = tw_osli_alloc_mem(sc))) {
378 		tw_osli_printf(sc, "error = %d",
379 			TW_CL_SEVERITY_ERROR_STRING,
380 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
381 			0x2005,
382 			"Memory allocation failure",
383 			error);
384 		tw_osli_free_resources(sc);
385 		return(error);
386 	}
387 
388 	/* Initialize the Common Layer for this controller. */
389 	if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
390 			TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
391 			sc->non_dma_mem, sc->dma_mem,
392 			sc->dma_mem_phys
393 			))) {
394 		tw_osli_printf(sc, "error = %d",
395 			TW_CL_SEVERITY_ERROR_STRING,
396 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
397 			0x2006,
398 			"Failed to initialize Common Layer/controller",
399 			error);
400 		tw_osli_free_resources(sc);
401 		return(error);
402 	}
403 
404 	/* Create the control device. */
405 	sc->ctrl_dev = make_dev(&twa_ops, device_get_unit(sc->bus_dev),
406 			UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
407 			"twa%d", device_get_unit(sc->bus_dev));
408 	sc->ctrl_dev->si_drv1 = sc;
409 
410 	if ((error = tw_osli_cam_attach(sc))) {
411 		tw_osli_free_resources(sc);
412 		tw_osli_printf(sc, "error = %d",
413 			TW_CL_SEVERITY_ERROR_STRING,
414 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
415 			0x2007,
416 			"Failed to initialize CAM",
417 			error);
418 		return(error);
419 	}
420 
421 	sc->watchdog_index = 0;
422 	callout_init_mp(&(sc->watchdog_callout[0]));
423 	callout_init_mp(&(sc->watchdog_callout[1]));
424 	callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
425 
426 	return(0);
427 }
428 
429 
430 static TW_VOID
431 twa_watchdog(TW_VOID *arg)
432 {
433 	struct tw_cl_ctlr_handle *ctlr_handle =
434 		(struct tw_cl_ctlr_handle *)arg;
435 	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
436 	int				i;
437 	int				i_need_a_reset = 0;
438 	int				driver_is_active = 0;
439 	TW_UINT64			current_time;
440 	struct tw_osli_req_context	*my_req;
441 
442 
443 //==============================================================================
444 	current_time = (TW_UINT64) (tw_osl_get_local_time());
445 
446 	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
447 		my_req = &(sc->req_ctx_buf[i]);
448 
449 		if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
450 			(my_req->deadline) &&
451 			(my_req->deadline < current_time)) {
452 			tw_cl_set_reset_needed(ctlr_handle);
453 #ifdef    TW_OSL_DEBUG
454 			device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
455 #else  /* TW_OSL_DEBUG */
456 			device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
457 #endif /* TW_OSL_DEBUG */
458 			break;
459 		}
460 	}
461 //==============================================================================
462 
463 	i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
464 
465 	i = (int) ((sc->watchdog_index++) & 1);
466 
467 	driver_is_active = tw_cl_is_active(ctlr_handle);
468 
469 	if (i_need_a_reset) {
470 #ifdef    TW_OSL_DEBUG
471 		device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
472 #endif /* TW_OSL_DEBUG */
473 		callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
474 		tw_cl_reset_ctlr(ctlr_handle);
475 #ifdef    TW_OSL_DEBUG
476 		device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
477 #endif /* TW_OSL_DEBUG */
478 	} else if (driver_is_active) {
479 		callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle);
480 	}
481 #ifdef    TW_OSL_DEBUG
482 	if (i_need_a_reset)
483 		device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
484 		"driver_is_active = %d\n",
485 		i_need_a_reset, driver_is_active);
486 #endif /* TW_OSL_DEBUG */
487 }
488 
489 
490 /*
491  * Function name:	tw_osli_alloc_mem
492  * Description:		Allocates memory needed both by CL and OSL.
493  *
494  * Input:		sc	-- OSL internal controller context
495  * Output:		None
496  * Return value:	0	-- success
497  *			non-zero-- failure
498  */
499 static TW_INT32
500 tw_osli_alloc_mem(struct twa_softc *sc)
501 {
502 	struct tw_osli_req_context	*req;
503 	TW_UINT32			max_sg_elements;
504 	TW_UINT32			non_dma_mem_size;
505 	TW_UINT32			dma_mem_size;
506 	TW_INT32			error;
507 	TW_INT32			i;
508 
509 	tw_osli_dbg_dprintf(3, sc, "entered");
510 
511 	sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
512 	sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
513 
514 	max_sg_elements = (sizeof(bus_addr_t) == 8) ?
515 		TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
516 
517 	if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
518 			sc->device_id, TW_OSLI_MAX_NUM_REQUESTS,  TW_OSLI_MAX_NUM_AENS,
519 			&(sc->alignment), &(sc->sg_size_factor),
520 			&non_dma_mem_size, &dma_mem_size
521 			))) {
522 		tw_osli_printf(sc, "error = %d",
523 			TW_CL_SEVERITY_ERROR_STRING,
524 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
525 			0x2008,
526 			"Can't get Common Layer's memory requirements",
527 			error);
528 		return(error);
529 	}
530 
531 	sc->non_dma_mem = kmalloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
532 	    M_WAITOK);
533 
534 	/* Create the parent dma tag. */
535 	if (bus_dma_tag_create(NULL,			/* parent */
536 				sc->alignment,		/* alignment */
537 				TW_OSLI_DMA_BOUNDARY,	/* boundary */
538 				BUS_SPACE_MAXADDR,	/* lowaddr */
539 				BUS_SPACE_MAXADDR, 	/* highaddr */
540 				TW_CL_MAX_IO_SIZE,	/* maxsize */
541 				max_sg_elements,	/* nsegments */
542 				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
543 				0,			/* flags */
544 				&sc->parent_tag		/* tag */)) {
545 		tw_osli_printf(sc, "error = %d",
546 			TW_CL_SEVERITY_ERROR_STRING,
547 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
548 			0x200A,
549 			"Can't allocate parent DMA tag",
550 			ENOMEM);
551 		return(ENOMEM);
552 	}
553 
554 	/* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
555 	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
556 				sc->alignment,		/* alignment */
557 				0,			/* boundary */
558 				BUS_SPACE_MAXADDR,	/* lowaddr */
559 				BUS_SPACE_MAXADDR, 	/* highaddr */
560 				dma_mem_size,		/* maxsize */
561 				1,			/* nsegments */
562 				BUS_SPACE_MAXSIZE,	/* maxsegsize */
563 				0,			/* flags */
564 				&sc->cmd_tag		/* tag */)) {
565 		tw_osli_printf(sc, "error = %d",
566 			TW_CL_SEVERITY_ERROR_STRING,
567 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
568 			0x200B,
569 			"Can't allocate DMA tag for Common Layer's "
570 			"DMA'able memory",
571 			ENOMEM);
572 		return(ENOMEM);
573 	}
574 
575 	if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
576 		BUS_DMA_NOWAIT, &sc->cmd_map)) {
577 		/* Try a second time. */
578 		if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
579 			BUS_DMA_NOWAIT, &sc->cmd_map)) {
580 			tw_osli_printf(sc, "error = %d",
581 				TW_CL_SEVERITY_ERROR_STRING,
582 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
583 				0x200C,
584 				"Can't allocate DMA'able memory for the"
585 				"Common Layer",
586 				ENOMEM);
587 			return(ENOMEM);
588 		}
589 	}
590 
591 	bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
592 		dma_mem_size, twa_map_load_callback,
593 		&sc->dma_mem_phys, 0);
594 
595 	/*
596 	 * Create a dma tag for data buffers; size will be the maximum
597 	 * possible I/O size (128kB).
598 	 */
599 	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
600 				sc->alignment,		/* alignment */
601 				0,			/* boundary */
602 				BUS_SPACE_MAXADDR,	/* lowaddr */
603 				BUS_SPACE_MAXADDR, 	/* highaddr */
604 				TW_CL_MAX_IO_SIZE,	/* maxsize */
605 				max_sg_elements,	/* nsegments */
606 				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
607 				BUS_DMA_ALLOCNOW,	/* flags */
608 				&sc->dma_tag		/* tag */)) {
609 		tw_osli_printf(sc, "error = %d",
610 			TW_CL_SEVERITY_ERROR_STRING,
611 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
612 			0x200F,
613 			"Can't allocate DMA tag for data buffers",
614 			ENOMEM);
615 		return(ENOMEM);
616 	}
617 
618 	/*
619 	 * Create a dma tag for ioctl data buffers; size will be the maximum
620 	 * possible I/O size (128kB).
621 	 */
622 	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
623 				sc->alignment,		/* alignment */
624 				0,			/* boundary */
625 				BUS_SPACE_MAXADDR,	/* lowaddr */
626 				BUS_SPACE_MAXADDR, 	/* highaddr */
627 				TW_CL_MAX_IO_SIZE,	/* maxsize */
628 				max_sg_elements,	/* nsegments */
629 				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
630 				BUS_DMA_ALLOCNOW,	/* flags */
631 				&sc->ioctl_tag		/* tag */)) {
632 		tw_osli_printf(sc, "error = %d",
633 			TW_CL_SEVERITY_ERROR_STRING,
634 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
635 			0x2010,
636 			"Can't allocate DMA tag for ioctl data buffers",
637 			ENOMEM);
638 		return(ENOMEM);
639 	}
640 
641 	/* Create just one map for all ioctl request data buffers. */
642 	if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
643 		tw_osli_printf(sc, "error = %d",
644 			TW_CL_SEVERITY_ERROR_STRING,
645 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
646 			0x2011,
647 			"Can't create ioctl map",
648 			ENOMEM);
649 		return(ENOMEM);
650 	}
651 
652 
653 	/* Initialize request queues. */
654 	tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
655 	tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
656 
657 	sc->req_ctx_buf = kmalloc((sizeof(struct tw_osli_req_context) *
658 	    TW_OSLI_MAX_NUM_REQUESTS), TW_OSLI_MALLOC_CLASS,
659 	    M_WAITOK | M_ZERO);
660 	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
661 		req = &(sc->req_ctx_buf[i]);
662 		req->ctlr = sc;
663 		if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
664 			tw_osli_printf(sc, "request # = %d, error = %d",
665 				TW_CL_SEVERITY_ERROR_STRING,
666 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
667 				0x2013,
668 				"Can't create dma map",
669 				i, ENOMEM);
670 			return(ENOMEM);
671 		}
672 
673 		/* Initialize the ioctl wakeup/ timeout mutex */
674 		req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
675 		lockinit(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", 0, 0);
676 
677 		/* Insert request into the free queue. */
678 		tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
679 	}
680 
681 	return(0);
682 }
683 
684 
685 
686 /*
687  * Function name:	tw_osli_free_resources
688  * Description:		Performs clean-up at the time of going down.
689  *
690  * Input:		sc	-- ptr to OSL internal ctlr context
691  * Output:		None
692  * Return value:	None
693  */
694 static TW_VOID
695 tw_osli_free_resources(struct twa_softc *sc)
696 {
697 	struct tw_osli_req_context	*req;
698 	TW_INT32			error = 0;
699 
700 	tw_osli_dbg_dprintf(3, sc, "entered");
701 
702 	/* Detach from CAM */
703 	tw_osli_cam_detach(sc);
704 
705 	if (sc->req_ctx_buf)
706 		while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
707 			NULL) {
708 			lockuninit(req->ioctl_wake_timeout_lock);
709 
710 			if ((error = bus_dmamap_destroy(sc->dma_tag,
711 					req->dma_map)))
712 				tw_osli_dbg_dprintf(1, sc,
713 					"dmamap_destroy(dma) returned %d",
714 					error);
715 		}
716 
717 	if ((sc->ioctl_tag) && (sc->ioctl_map))
718 		if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
719 			tw_osli_dbg_dprintf(1, sc,
720 				"dmamap_destroy(ioctl) returned %d", error);
721 
722 	/* Free all memory allocated so far. */
723 	if (sc->req_ctx_buf)
724 		kfree(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
725 
726 	if (sc->non_dma_mem)
727 		kfree(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
728 
729 	if (sc->dma_mem) {
730 		bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
731 		bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
732 			sc->cmd_map);
733 	}
734 	if (sc->cmd_tag)
735 		if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
736 			tw_osli_dbg_dprintf(1, sc,
737 				"dma_tag_destroy(cmd) returned %d", error);
738 
739 	if (sc->dma_tag)
740 		if ((error = bus_dma_tag_destroy(sc->dma_tag)))
741 			tw_osli_dbg_dprintf(1, sc,
742 				"dma_tag_destroy(dma) returned %d", error);
743 
744 	if (sc->ioctl_tag)
745 		if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
746 			tw_osli_dbg_dprintf(1, sc,
747 				"dma_tag_destroy(ioctl) returned %d", error);
748 
749 	if (sc->parent_tag)
750 		if ((error = bus_dma_tag_destroy(sc->parent_tag)))
751 			tw_osli_dbg_dprintf(1, sc,
752 				"dma_tag_destroy(parent) returned %d", error);
753 
754 
755 	/* Disconnect the interrupt handler. */
756 	if ((error = twa_teardown_intr(sc)))
757 			tw_osli_dbg_dprintf(1, sc,
758 				"teardown_intr returned %d", error);
759 
760 	if (sc->irq_res != NULL)
761 		if ((error = bus_release_resource(sc->bus_dev,
762 				SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
763 			tw_osli_dbg_dprintf(1, sc,
764 				"release_resource(irq) returned %d", error);
765 
766 	if (sc->irq_type == PCI_INTR_TYPE_MSI)
767 		pci_release_msi(sc->bus_dev);
768 
769 	/* Release the register window mapping. */
770 	if (sc->reg_res != NULL)
771 		if ((error = bus_release_resource(sc->bus_dev,
772 				SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
773 			tw_osli_dbg_dprintf(1, sc,
774 				"release_resource(io) returned %d", error);
775 
776 	/* Destroy the control device. */
777 	if (sc->ctrl_dev != NULL)
778 		destroy_dev(sc->ctrl_dev);
779 	dev_ops_remove_minor(&twa_ops, device_get_unit(sc->bus_dev));
780 }
781 
782 
783 
784 /*
785  * Function name:	twa_detach
786  * Description:		Called when the controller is being detached from
787  *			the pci bus.
788  *
789  * Input:		dev	-- bus device corresponding to the ctlr
790  * Output:		None
791  * Return value:	0	-- success
792  *			non-zero-- failure
793  */
794 static TW_INT32
795 twa_detach(device_t dev)
796 {
797 	struct twa_softc	*sc = device_get_softc(dev);
798 	TW_INT32		error;
799 
800 	tw_osli_dbg_dprintf(3, sc, "entered");
801 
802 	error = EBUSY;
803 	if (sc->open) {
804 		tw_osli_printf(sc, "error = %d",
805 			TW_CL_SEVERITY_ERROR_STRING,
806 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
807 			0x2014,
808 			"Device open",
809 			error);
810 		goto out;
811 	}
812 
813 	/* Shut the controller down. */
814 	if ((error = twa_shutdown(dev)))
815 		goto out;
816 
817 	/* Free all resources associated with this controller. */
818 	tw_osli_free_resources(sc);
819 	error = 0;
820 
821 out:
822 	return(error);
823 }
824 
825 
826 
827 /*
828  * Function name:	twa_shutdown
829  * Description:		Called at unload/shutdown time.  Lets the controller
830  *			know that we are going down.
831  *
832  * Input:		dev	-- bus device corresponding to the ctlr
833  * Output:		None
834  * Return value:	0	-- success
835  *			non-zero-- failure
836  */
837 static TW_INT32
838 twa_shutdown(device_t dev)
839 {
840 	struct twa_softc	*sc = device_get_softc(dev);
841 	TW_INT32		error = 0;
842 
843 	tw_osli_dbg_dprintf(3, sc, "entered");
844 
845 	/* Disconnect interrupts. */
846 	error = twa_teardown_intr(sc);
847 
848 	/* Stop watchdog task. */
849 	callout_cancel(&(sc->watchdog_callout[0]));
850 	callout_cancel(&(sc->watchdog_callout[1]));
851 
852 	/* Disconnect from the controller. */
853 	if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
854 		tw_osli_printf(sc, "error = %d",
855 			TW_CL_SEVERITY_ERROR_STRING,
856 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
857 			0x2015,
858 			"Failed to shutdown Common Layer/controller",
859 			error);
860 	}
861 	return(error);
862 }
863 
864 
865 
866 /*
867  * Function name:	twa_pci_intr
868  * Description:		Interrupt handler.  Wrapper for twa_interrupt.
869  *
870  * Input:		arg	-- ptr to OSL internal ctlr context
871  * Output:		None
872  * Return value:	None
873  */
874 static TW_VOID
875 twa_pci_intr(TW_VOID *arg)
876 {
877 	struct twa_softc	*sc = (struct twa_softc *)arg;
878 
879 	tw_osli_dbg_dprintf(10, sc, "entered");
880 	tw_cl_interrupt(&(sc->ctlr_handle));
881 }
882 
883 
884 /*
885  * Function name:	tw_osli_fw_passthru
886  * Description:		Builds a fw passthru cmd pkt, and submits it to CL.
887  *
888  * Input:		sc	-- ptr to OSL internal ctlr context
889  *			buf	-- ptr to ioctl pkt understood by CL
890  * Output:		None
891  * Return value:	0	-- success
892  *			non-zero-- failure
893  */
894 TW_INT32
895 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
896 {
897 	struct tw_osli_req_context		*req;
898 	struct tw_osli_ioctl_no_data_buf	*user_buf =
899 		(struct tw_osli_ioctl_no_data_buf *)buf;
900 	TW_TIME					end_time;
901 	TW_UINT32				timeout = 60;
902 	TW_UINT32				data_buf_size_adjusted;
903 	struct tw_cl_req_packet			*req_pkt;
904 	struct tw_cl_passthru_req_packet	*pt_req;
905 	TW_INT32				error;
906 
907 	tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
908 
909 	if ((req = tw_osli_get_request(sc)) == NULL)
910 		return(EBUSY);
911 
912 	req->req_handle.osl_req_ctxt = req;
913 	req->orig_req = buf;
914 	req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
915 
916 	req_pkt = &(req->req_pkt);
917 	req_pkt->status = 0;
918 	req_pkt->tw_osl_callback = tw_osl_complete_passthru;
919 	/* Let the Common Layer retry the request on cmd queue full. */
920 	req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
921 
922 	pt_req = &(req_pkt->gen_req_pkt.pt_req);
923 	/*
924 	 * Make sure that the data buffer sent to firmware is a
925 	 * 512 byte multiple in size.
926 	 */
927 	data_buf_size_adjusted = roundup2(user_buf->driver_pkt.buffer_length,
928 	    sc->sg_size_factor);
929 	if ((req->length = data_buf_size_adjusted)) {
930 		req->data = kmalloc(data_buf_size_adjusted,
931 		    TW_OSLI_MALLOC_CLASS, M_WAITOK);
932 		/* Copy the payload. */
933 		if ((error = copyin((TW_VOID *)(user_buf->pdata),
934 			req->data,
935 			user_buf->driver_pkt.buffer_length)) != 0) {
936 			tw_osli_printf(sc, "error = %d",
937 				TW_CL_SEVERITY_ERROR_STRING,
938 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
939 				0x2017,
940 				"Could not copyin fw_passthru data_buf",
941 				error);
942 			goto fw_passthru_err;
943 		}
944 		pt_req->sgl_entries = 1; /* will be updated during mapping */
945 		req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
946 			TW_OSLI_REQ_FLAGS_DATA_OUT);
947 	} else
948 		pt_req->sgl_entries = 0; /* no payload */
949 
950 	pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
951 	pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
952 
953 	if ((error = tw_osli_map_request(req)))
954 		goto fw_passthru_err;
955 
956 	end_time = tw_osl_get_local_time() + timeout;
957 	while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
958 		lockmgr(req->ioctl_wake_timeout_lock, LK_EXCLUSIVE);
959 		req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
960 
961 		error = lksleep(req, req->ioctl_wake_timeout_lock, 0,
962 			    "twa_passthru", timeout*hz);
963 		lockmgr(req->ioctl_wake_timeout_lock, LK_RELEASE);
964 
965 		if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
966 			error = 0;
967 		req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
968 
969 		if (! error) {
970 			if (((error = req->error_code)) ||
971 				((error = (req->state !=
972 				TW_OSLI_REQ_STATE_COMPLETE))) ||
973 				((error = req_pkt->status)))
974 				goto fw_passthru_err;
975 			break;
976 		}
977 
978 		if (req_pkt->status) {
979 			error = req_pkt->status;
980 			goto fw_passthru_err;
981 		}
982 
983 		if (error == EWOULDBLOCK) {
984 			/* Time out! */
985 			if ((!(req->error_code))                       &&
986 			    (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
987 			    (!(req_pkt->status))			  ) {
988 #ifdef    TW_OSL_DEBUG
989 				tw_osli_printf(sc, "request = %p",
990 					TW_CL_SEVERITY_ERROR_STRING,
991 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
992 					0x7777,
993 					"FALSE Passthru timeout!",
994 					req);
995 #endif /* TW_OSL_DEBUG */
996 				error = 0; /* False error */
997 				break;
998 			}
999 			if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1000 #ifdef    TW_OSL_DEBUG
1001 				tw_osli_printf(sc, "request = %p",
1002 					TW_CL_SEVERITY_ERROR_STRING,
1003 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1004 					0x2018,
1005 					"Passthru request timed out!",
1006 					req);
1007 #else  /* TW_OSL_DEBUG */
1008 			device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1009 #endif /* TW_OSL_DEBUG */
1010 				tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1011 			}
1012 
1013 			error = 0;
1014 			end_time = tw_osl_get_local_time() + timeout;
1015 			continue;
1016 			/*
1017 			 * Don't touch req after a reset.  It (and any
1018 			 * associated data) will be
1019 			 * unmapped by the callback.
1020 			 */
1021 		}
1022 		/*
1023 		 * Either the request got completed, or we were woken up by a
1024 		 * signal.  Calculate the new timeout, in case it was the latter.
1025 		 */
1026 		timeout = (end_time - tw_osl_get_local_time());
1027 	} /* End of while loop */
1028 
1029 	/* If there was a payload, copy it back. */
1030 	if ((!error) && (req->length))
1031 		if ((error = copyout(req->data, user_buf->pdata,
1032 			user_buf->driver_pkt.buffer_length)))
1033 			tw_osli_printf(sc, "error = %d",
1034 				TW_CL_SEVERITY_ERROR_STRING,
1035 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1036 				0x2019,
1037 				"Could not copyout fw_passthru data_buf",
1038 				error);
1039 
1040 fw_passthru_err:
1041 
1042 	if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1043 		error = EBUSY;
1044 
1045 	user_buf->driver_pkt.os_status = error;
1046 	/* Free resources. */
1047 	if (req->data)
1048 		kfree(req->data, TW_OSLI_MALLOC_CLASS);
1049 	tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1050 	return(error);
1051 }
1052 
1053 
1054 
1055 /*
1056  * Function name:	tw_osl_complete_passthru
1057  * Description:		Called to complete passthru requests.
1058  *
1059  * Input:		req_handle	-- ptr to request handle
1060  * Output:		None
1061  * Return value:	None
1062  */
1063 TW_VOID
1064 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1065 {
1066 	struct tw_osli_req_context	*req = req_handle->osl_req_ctxt;
1067 	struct tw_cl_req_packet		*req_pkt =
1068 		(struct tw_cl_req_packet *)(&req->req_pkt);
1069 	struct twa_softc		*sc = req->ctlr;
1070 
1071 	tw_osli_dbg_dprintf(5, sc, "entered");
1072 
1073 	if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1074 		tw_osli_printf(sc, "request = %p, status = %d",
1075 			TW_CL_SEVERITY_ERROR_STRING,
1076 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1077 			0x201B,
1078 			"Unposted command completed!!",
1079 			req, req->state);
1080 	}
1081 
1082 	/*
1083 	 * Remove request from the busy queue.  Just mark it complete.
1084 	 * There's no need to move it into the complete queue as we are
1085 	 * going to be done with it right now.
1086 	 */
1087 	req->state = TW_OSLI_REQ_STATE_COMPLETE;
1088 	tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1089 
1090 	tw_osli_unmap_request(req);
1091 
1092 	/*
1093 	 * Don't do a wake up if there was an error even before the request
1094 	 * was sent down to the Common Layer, and we hadn't gotten an
1095 	 * EINPROGRESS.  The request originator will then be returned an
1096 	 * error, and he can do the clean-up.
1097 	 */
1098 	if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1099 		return;
1100 
1101 	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1102 		if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1103 			/* Wake up the sleeping command originator. */
1104 			tw_osli_dbg_dprintf(5, sc,
1105 				"Waking up originator of request %p", req);
1106 			req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1107 			wakeup_one(req);
1108 		} else {
1109 			/*
1110 			 * If the request completed even before mtx_sleep
1111 			 * was called, simply return.
1112 			 */
1113 			if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1114 				return;
1115 
1116 			if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1117 				return;
1118 
1119 			tw_osli_printf(sc, "request = %p",
1120 				TW_CL_SEVERITY_ERROR_STRING,
1121 				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1122 				0x201C,
1123 				"Passthru callback called, "
1124 				"and caller not sleeping",
1125 				req);
1126 		}
1127 	} else {
1128 		tw_osli_printf(sc, "request = %p",
1129 			TW_CL_SEVERITY_ERROR_STRING,
1130 			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1131 			0x201D,
1132 			"Passthru callback called for non-passthru request",
1133 			req);
1134 	}
1135 }
1136 
1137 
1138 
1139 /*
1140  * Function name:	tw_osli_get_request
1141  * Description:		Gets a request pkt from the free queue.
1142  *
1143  * Input:		sc	-- ptr to OSL internal ctlr context
1144  * Output:		None
1145  * Return value:	ptr to request pkt	-- success
1146  *			NULL			-- failure
1147  */
1148 struct tw_osli_req_context *
1149 tw_osli_get_request(struct twa_softc *sc)
1150 {
1151 	struct tw_osli_req_context	*req;
1152 
1153 	tw_osli_dbg_dprintf(4, sc, "entered");
1154 
1155 	/* Get a free request packet. */
1156 	req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1157 
1158 	/* Initialize some fields to their defaults. */
1159 	if (req) {
1160 		req->req_handle.osl_req_ctxt = NULL;
1161 		req->req_handle.cl_req_ctxt = NULL;
1162 		req->req_handle.is_io = 0;
1163 		req->data = NULL;
1164 		req->length = 0;
1165 		req->deadline = 0;
1166 		req->real_data = NULL;
1167 		req->real_length = 0;
1168 		req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1169 		req->flags = 0;
1170 		req->error_code = 0;
1171 		req->orig_req = NULL;
1172 
1173 		bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1174 
1175 	}
1176 	return(req);
1177 }
1178 
1179 
1180 
1181 /*
1182  * Function name:	twa_map_load_data_callback
1183  * Description:		Callback of bus_dmamap_load for the buffer associated
1184  *			with data.  Updates the cmd pkt (size/sgl_entries
1185  *			fields, as applicable) to reflect the number of sg
1186  *			elements.
1187  *
1188  * Input:		arg	-- ptr to OSL internal request context
1189  *			segs	-- ptr to a list of segment descriptors
1190  *			nsegments--# of segments
1191  *			error	-- 0 if no errors encountered before callback,
1192  *				   non-zero if errors were encountered
1193  * Output:		None
1194  * Return value:	None
1195  */
1196 static TW_VOID
1197 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1198 	TW_INT32 nsegments, TW_INT32 error)
1199 {
1200 	struct tw_osli_req_context	*req =
1201 		(struct tw_osli_req_context *)arg;
1202 	struct twa_softc		*sc = req->ctlr;
1203 	struct tw_cl_req_packet		*req_pkt = &(req->req_pkt);
1204 
1205 	tw_osli_dbg_dprintf(10, sc, "entered");
1206 
1207 	if (error == EINVAL) {
1208 		req->error_code = error;
1209 		return;
1210 	}
1211 
1212 	/* Mark the request as currently being processed. */
1213 	req->state = TW_OSLI_REQ_STATE_BUSY;
1214 	/* Move the request into the busy queue. */
1215 	tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1216 
1217 	req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1218 
1219 	if (error == EFBIG) {
1220 		req->error_code = error;
1221 		goto out;
1222 	}
1223 
1224 	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1225 		struct tw_cl_passthru_req_packet	*pt_req;
1226 
1227 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1228 			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1229 				BUS_DMASYNC_PREREAD);
1230 
1231 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1232 			/*
1233 			 * If we're using an alignment buffer, and we're
1234 			 * writing data, copy the real data out.
1235 			 */
1236 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1237 				bcopy(req->real_data, req->data, req->real_length);
1238 			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1239 				BUS_DMASYNC_PREWRITE);
1240 		}
1241 
1242 		pt_req = &(req_pkt->gen_req_pkt.pt_req);
1243 		pt_req->sg_list = (TW_UINT8 *)segs;
1244 		pt_req->sgl_entries += (nsegments - 1);
1245 		error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1246 			&(req->req_handle));
1247 	} else {
1248 		struct tw_cl_scsi_req_packet	*scsi_req;
1249 
1250 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1251 			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1252 				BUS_DMASYNC_PREREAD);
1253 
1254 		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1255 			/*
1256 			 * If we're using an alignment buffer, and we're
1257 			 * writing data, copy the real data out.
1258 			 */
1259 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1260 				bcopy(req->real_data, req->data, req->real_length);
1261 			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1262 				BUS_DMASYNC_PREWRITE);
1263 		}
1264 
1265 		scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1266 		scsi_req->sg_list = (TW_UINT8 *)segs;
1267 		scsi_req->sgl_entries += (nsegments - 1);
1268 		error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1269 			&(req->req_handle));
1270 	}
1271 
1272 out:
1273 	if (error) {
1274 		req->error_code = error;
1275 		req_pkt->tw_osl_callback(&(req->req_handle));
1276 		/*
1277 		 * If the caller had been returned EINPROGRESS, and he has
1278 		 * registered a callback for handling completion, the callback
1279 		 * will never get called because we were unable to submit the
1280 		 * request.  So, free up the request right here.
1281 		 */
1282 		if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1283 			tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1284 	}
1285 }
1286 
1287 
1288 
1289 /*
1290  * Function name:	twa_map_load_callback
1291  * Description:		Callback of bus_dmamap_load for the buffer associated
1292  *			with a cmd pkt.
1293  *
1294  * Input:		arg	-- ptr to variable to hold phys addr
1295  *			segs	-- ptr to a list of segment descriptors
1296  *			nsegments--# of segments
1297  *			error	-- 0 if no errors encountered before callback,
1298  *				   non-zero if errors were encountered
1299  * Output:		None
1300  * Return value:	None
1301  */
1302 static TW_VOID
1303 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1304 	TW_INT32 nsegments, TW_INT32 error)
1305 {
1306 	*((bus_addr_t *)arg) = segs[0].ds_addr;
1307 }
1308 
1309 
1310 
1311 /*
1312  * Function name:	tw_osli_map_request
1313  * Description:		Maps a cmd pkt and data associated with it, into
1314  *			DMA'able memory.
1315  *
1316  * Input:		req	-- ptr to request pkt
1317  * Output:		None
1318  * Return value:	0	-- success
1319  *			non-zero-- failure
1320  */
1321 TW_INT32
1322 tw_osli_map_request(struct tw_osli_req_context *req)
1323 {
1324 	struct twa_softc	*sc = req->ctlr;
1325 	TW_INT32		error = 0;
1326 
1327 	tw_osli_dbg_dprintf(10, sc, "entered");
1328 
1329 	/* If the command involves data, map that too. */
1330 	if (req->data != NULL) {
1331 		/*
1332 		 * It's sufficient for the data pointer to be 4-byte aligned
1333 		 * to work with 9000.  However, if 4-byte aligned addresses
1334 		 * are passed to bus_dmamap_load, we can get back sg elements
1335 		 * that are not 512-byte multiples in size.  So, we will let
1336 		 * only those buffers that are 512-byte aligned to pass
1337 		 * through, and bounce the rest, so as to make sure that we
1338 		 * always get back sg elements that are 512-byte multiples
1339 		 * in size.
1340 		 */
1341 		if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1342 			(req->length % sc->sg_size_factor)) {
1343 			req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1344 			/* Save original data pointer and length. */
1345 			req->real_data = req->data;
1346 			req->real_length = req->length;
1347 			req->length = roundup2(req->length, sc->sg_size_factor);
1348 			req->data = kmalloc(req->length, TW_OSLI_MALLOC_CLASS,
1349 					M_NOWAIT);
1350 			if (req->data == NULL) {
1351 				tw_osli_printf(sc, "error = %d",
1352 					TW_CL_SEVERITY_ERROR_STRING,
1353 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1354 					0x201E,
1355 					"Failed to allocate memory "
1356 					"for bounce buffer",
1357 					ENOMEM);
1358 				/* Restore original data pointer and length. */
1359 				req->data = req->real_data;
1360 				req->length = req->real_length;
1361 				return(ENOMEM);
1362 			}
1363 		}
1364 
1365 		/*
1366 		 * Map the data buffer into bus space and build the SG list.
1367 		 */
1368 		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1369 			/* Lock against multiple simultaneous ioctl calls. */
1370 			spin_lock(sc->io_lock);
1371 			error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1372 				req->data, req->length,
1373 				twa_map_load_data_callback, req,
1374 				BUS_DMA_WAITOK);
1375 			spin_unlock(sc->io_lock);
1376 		} else {
1377 			/*
1378 			 * There's only one CAM I/O thread running at a time.
1379 			 * So, there's no need to hold the io_lock.
1380 			 */
1381 			error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1382 				req->data, req->length,
1383 				twa_map_load_data_callback, req,
1384 				BUS_DMA_WAITOK);
1385 		}
1386 
1387 		if (!error)
1388 			error = req->error_code;
1389 		else {
1390 			if (error == EINPROGRESS) {
1391 				/*
1392 				 * Specifying sc->io_lock as the lockfuncarg
1393 				 * in ...tag_create should protect the access
1394 				 * of ...FLAGS_MAPPED from the callback.
1395 				 */
1396 				spin_lock(sc->io_lock);
1397 				if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1398 					req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1399 				tw_osli_disallow_new_requests(sc, &(req->req_handle));
1400 				spin_unlock(sc->io_lock);
1401 				error = 0;
1402 			} else {
1403 				tw_osli_printf(sc, "error = %d",
1404 					TW_CL_SEVERITY_ERROR_STRING,
1405 					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1406 					0x9999,
1407 					"Failed to map DMA memory "
1408 					"for I/O request",
1409 					error);
1410 				req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1411 				/* Free alignment buffer if it was used. */
1412 				if (req->flags &
1413 					TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1414 					kfree(req->data, TW_OSLI_MALLOC_CLASS);
1415 					/*
1416 					 * Restore original data pointer
1417 					 * and length.
1418 					 */
1419 					req->data = req->real_data;
1420 					req->length = req->real_length;
1421 				}
1422 			}
1423 		}
1424 
1425 	} else {
1426 		/* Mark the request as currently being processed. */
1427 		req->state = TW_OSLI_REQ_STATE_BUSY;
1428 		/* Move the request into the busy queue. */
1429 		tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1430 		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1431 			error = tw_cl_fw_passthru(&sc->ctlr_handle,
1432 					&(req->req_pkt), &(req->req_handle));
1433 		else
1434 			error = tw_cl_start_io(&sc->ctlr_handle,
1435 					&(req->req_pkt), &(req->req_handle));
1436 		if (error) {
1437 			req->error_code = error;
1438 			req->req_pkt.tw_osl_callback(&(req->req_handle));
1439 		}
1440 	}
1441 	return(error);
1442 }
1443 
1444 
1445 
1446 /*
1447  * Function name:	tw_osli_unmap_request
1448  * Description:		Undoes the mapping done by tw_osli_map_request.
1449  *
1450  * Input:		req	-- ptr to request pkt
1451  * Output:		None
1452  * Return value:	None
1453  */
1454 TW_VOID
1455 tw_osli_unmap_request(struct tw_osli_req_context *req)
1456 {
1457 	struct twa_softc	*sc = req->ctlr;
1458 
1459 	tw_osli_dbg_dprintf(10, sc, "entered");
1460 
1461 	/* If the command involved data, unmap that too. */
1462 	if (req->data != NULL) {
1463 		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1464 			/* Lock against multiple simultaneous ioctl calls. */
1465 			spin_lock(sc->io_lock);
1466 
1467 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1468 				bus_dmamap_sync(sc->ioctl_tag,
1469 					sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1470 
1471 				/*
1472 				 * If we are using a bounce buffer, and we are
1473 				 * reading data, copy the real data in.
1474 				 */
1475 				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1476 					bcopy(req->data, req->real_data,
1477 						req->real_length);
1478 			}
1479 
1480 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1481 				bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1482 					BUS_DMASYNC_POSTWRITE);
1483 
1484 			bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1485 
1486 			spin_unlock(sc->io_lock);
1487 		} else {
1488 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1489 				bus_dmamap_sync(sc->dma_tag,
1490 					req->dma_map, BUS_DMASYNC_POSTREAD);
1491 
1492 				/*
1493 				 * If we are using a bounce buffer, and we are
1494 				 * reading data, copy the real data in.
1495 				 */
1496 				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1497 					bcopy(req->data, req->real_data,
1498 						req->real_length);
1499 			}
1500 			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1501 				bus_dmamap_sync(sc->dma_tag, req->dma_map,
1502 					BUS_DMASYNC_POSTWRITE);
1503 
1504 			bus_dmamap_unload(sc->dma_tag, req->dma_map);
1505 		}
1506 	}
1507 
1508 	/* Free alignment buffer if it was used. */
1509 	if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1510 		kfree(req->data, TW_OSLI_MALLOC_CLASS);
1511 		/* Restore original data pointer and length. */
1512 		req->data = req->real_data;
1513 		req->length = req->real_length;
1514 	}
1515 }
1516 
1517 
1518 
1519 #ifdef TW_OSL_DEBUG
1520 
1521 TW_VOID	twa_report_stats(TW_VOID);
1522 TW_VOID	twa_reset_stats(TW_VOID);
1523 TW_VOID	tw_osli_print_ctlr_stats(struct twa_softc *sc);
1524 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1525 
1526 
1527 /*
1528  * Function name:	twa_report_stats
1529  * Description:		For being called from ddb.  Calls functions that print
1530  *			OSL and CL internal stats for the controller.
1531  *
1532  * Input:		None
1533  * Output:		None
1534  * Return value:	None
1535  */
1536 TW_VOID
1537 twa_report_stats(TW_VOID)
1538 {
1539 	struct twa_softc	*sc;
1540 	TW_INT32		i;
1541 
1542 	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1543 		tw_osli_print_ctlr_stats(sc);
1544 		tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1545 	}
1546 }
1547 
1548 
1549 
1550 /*
1551  * Function name:	tw_osli_print_ctlr_stats
1552  * Description:		For being called from ddb.  Prints OSL controller stats
1553  *
1554  * Input:		sc	-- ptr to OSL internal controller context
1555  * Output:		None
1556  * Return value:	None
1557  */
1558 TW_VOID
1559 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1560 {
1561 	twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1562 	twa_printf(sc, "OSLq type  current  max\n");
1563 	twa_printf(sc, "free      %04d     %04d\n",
1564 		sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1565 		sc->q_stats[TW_OSLI_FREE_Q].max_len);
1566 	twa_printf(sc, "busy      %04d     %04d\n",
1567 		sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1568 		sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1569 }
1570 
1571 
1572 
1573 /*
1574  * Function name:	twa_print_req_info
1575  * Description:		For being called from ddb.  Calls functions that print
1576  *			OSL and CL internal details for the request.
1577  *
1578  * Input:		req	-- ptr to OSL internal request context
1579  * Output:		None
1580  * Return value:	None
1581  */
1582 TW_VOID
1583 twa_print_req_info(struct tw_osli_req_context *req)
1584 {
1585 	struct twa_softc	*sc = req->ctlr;
1586 
1587 	twa_printf(sc, "OSL details for request:\n");
1588 	twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1589 		"data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1590 		"state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1591 		"next_req = %p, prev_req = %p, dma_map = %p\n",
1592 		req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1593 		req->data, req->length, req->real_data, req->real_length,
1594 		req->state, req->flags, req->error_code, req->orig_req,
1595 		req->link.next, req->link.prev, req->dma_map);
1596 	tw_cl_print_req_info(&(req->req_handle));
1597 }
1598 
1599 
1600 
1601 /*
1602  * Function name:	twa_reset_stats
1603  * Description:		For being called from ddb.
1604  *			Resets some OSL controller stats.
1605  *
1606  * Input:		None
1607  * Output:		None
1608  * Return value:	None
1609  */
1610 TW_VOID
1611 twa_reset_stats(TW_VOID)
1612 {
1613 	struct twa_softc	*sc;
1614 	TW_INT32		i;
1615 
1616 	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1617 		sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1618 		sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1619 		tw_cl_reset_stats(&sc->ctlr_handle);
1620 	}
1621 }
1622 
1623 #endif /* TW_OSL_DEBUG */
1624