xref: /dragonfly/sys/dev/raid/twa/tw_cl_init.c (revision 1e0dd9dd)
1 /*
2  * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3  * Copyright (c) 2004-05 Vinod Kashyap
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: head/sys/dev/twa/tw_cl_init.c 212008 2010-08-30 19:15:04Z delphij $
28  */
29 
30 /*
31  * AMCC'S 3ware driver for 9000 series storage controllers.
32  *
33  * Author: Vinod Kashyap
34  * Modifications by: Adam Radford
35  * Modifications by: Manjunath Ranganathaiah
36  */
37 
38 
39 /*
40  * Common Layer initialization functions.
41  */
42 
43 
44 #include "tw_osl_share.h"
45 #include "tw_cl_share.h"
46 #include "tw_cl_fwif.h"
47 #include "tw_cl_ioctl.h"
48 #include "tw_cl.h"
49 #include "tw_cl_externs.h"
50 #include "tw_osl_ioctl.h"
51 
52 
53 /*
54  * Function name:	tw_cl_ctlr_supported
55  * Description:		Determines if a controller is supported.
56  *
57  * Input:		vendor_id -- vendor id of the controller
58  *			device_id -- device id of the controller
59  * Output:		None
60  * Return value:	TW_CL_TRUE-- controller supported
61  *			TW_CL_FALSE-- controller not supported
62  */
63 TW_INT32
tw_cl_ctlr_supported(TW_INT32 vendor_id,TW_INT32 device_id)64 tw_cl_ctlr_supported(TW_INT32 vendor_id, TW_INT32 device_id)
65 {
66 	if ((vendor_id == TW_CL_VENDOR_ID) &&
67 		((device_id == TW_CL_DEVICE_ID_9K) ||
68 		 (device_id == TW_CL_DEVICE_ID_9K_X) ||
69 		 (device_id == TW_CL_DEVICE_ID_9K_E) ||
70 		 (device_id == TW_CL_DEVICE_ID_9K_SA)))
71 		return(TW_CL_TRUE);
72 	return(TW_CL_FALSE);
73 }
74 
75 
76 
77 /*
78  * Function name:	tw_cl_get_pci_bar_info
79  * Description:		Returns PCI BAR info.
80  *
81  * Input:		device_id -- device id of the controller
82  *			bar_type -- type of PCI BAR in question
83  * Output:		bar_num -- PCI BAR number corresponding to bar_type
84  *			bar0_offset -- byte offset from BAR 0 (0x10 in
85  *					PCI config space)
86  *			bar_size -- size, in bytes, of the BAR in question
87  * Return value:	0 -- success
88  *			non-zero -- failure
89  */
90 TW_INT32
tw_cl_get_pci_bar_info(TW_INT32 device_id,TW_INT32 bar_type,TW_INT32 * bar_num,TW_INT32 * bar0_offset,TW_INT32 * bar_size)91 tw_cl_get_pci_bar_info(TW_INT32 device_id, TW_INT32 bar_type,
92 	TW_INT32 *bar_num, TW_INT32 *bar0_offset, TW_INT32 *bar_size)
93 {
94 	TW_INT32	error = TW_OSL_ESUCCESS;
95 
96 	switch(device_id) {
97 	case TW_CL_DEVICE_ID_9K:
98 		switch(bar_type) {
99 		case TW_CL_BAR_TYPE_IO:
100 			*bar_num = 0;
101 			*bar0_offset = 0;
102 			*bar_size = 4;
103 			break;
104 
105 		case TW_CL_BAR_TYPE_MEM:
106 			*bar_num = 1;
107 			*bar0_offset = 0x4;
108 			*bar_size = 8;
109 			break;
110 
111 		case TW_CL_BAR_TYPE_SBUF:
112 			*bar_num = 2;
113 			*bar0_offset = 0xC;
114 			*bar_size = 8;
115 			break;
116 		}
117 		break;
118 
119 	case TW_CL_DEVICE_ID_9K_X:
120 	case TW_CL_DEVICE_ID_9K_E:
121 	case TW_CL_DEVICE_ID_9K_SA:
122 		switch(bar_type) {
123 		case TW_CL_BAR_TYPE_IO:
124 			*bar_num = 2;
125 			*bar0_offset = 0x10;
126 			*bar_size = 4;
127 			break;
128 
129 		case TW_CL_BAR_TYPE_MEM:
130 			*bar_num = 1;
131 			*bar0_offset = 0x8;
132 			*bar_size = 8;
133 			break;
134 
135 		case TW_CL_BAR_TYPE_SBUF:
136 			*bar_num = 0;
137 			*bar0_offset = 0;
138 			*bar_size = 8;
139 			break;
140 		}
141 		break;
142 
143 	default:
144 		error = TW_OSL_ENOTTY;
145 		break;
146 	}
147 
148 	return(error);
149 }
150 
151 
152 
153 /*
154  * Function name:	tw_cl_get_mem_requirements
155  * Description:		Provides info about Common Layer requirements for a
156  *			controller, given the controller type (in 'flags').
157  * Input:		ctlr_handle -- controller handle
158  *			flags -- more info passed by the OS Layer
159  *			device_id -- device id of the controller
160  *			max_simult_reqs -- maximum # of simultaneous
161  *					requests that the OS Layer expects
162  *					the Common Layer to support
163  *			max_aens -- maximun # of AEN's needed to be supported
164  * Output:		alignment -- alignment needed for all DMA'able
165  *					buffers
166  *			sg_size_factor -- every SG element should have a size
167  *					that's a multiple of this number
168  *			non_dma_mem_size -- # of bytes of memory needed for
169  *					non-DMA purposes
170  *			dma_mem_size -- # of bytes of DMA'able memory needed
171  *			per_req_dma_mem_size -- # of bytes of DMA'able memory
172  *					needed per request, if applicable
173  *			per_req_non_dma_mem_size -- # of bytes of memory needed
174  *					per request for non-DMA purposes,
175  *					if applicable
176  * Output:		None
177  * Return value:	0	-- success
178  *			non-zero-- failure
179  */
180 TW_INT32
tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle * ctlr_handle,TW_UINT32 flags,TW_INT32 device_id,TW_INT32 max_simult_reqs,TW_INT32 max_aens,TW_UINT32 * alignment,TW_UINT32 * sg_size_factor,TW_UINT32 * non_dma_mem_size,TW_UINT32 * dma_mem_size)181 tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle *ctlr_handle,
182 	TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs,
183 	TW_INT32 max_aens, TW_UINT32 *alignment, TW_UINT32 *sg_size_factor,
184 	TW_UINT32 *non_dma_mem_size, TW_UINT32 *dma_mem_size
185 	)
186 {
187 	if (device_id == 0)
188 		device_id = TW_CL_DEVICE_ID_9K;
189 
190 	if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
191 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
192 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
193 			0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
194 			"Too many simultaneous requests to support!",
195 			"requested = %d, supported = %d, error = %d\n",
196 			max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
197 			TW_OSL_EBIG);
198 		return(TW_OSL_EBIG);
199 	}
200 
201 	*alignment = TWA_ALIGNMENT(device_id);
202 	*sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
203 
204 	/*
205 	 * Total non-DMA memory needed is the sum total of memory needed for
206 	 * the controller context, request packets (including the 1 needed for
207 	 * CL internal requests), and event packets.
208 	 */
209 
210 	*non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
211 		(sizeof(struct tw_cli_req_context) * max_simult_reqs) +
212 		(sizeof(struct tw_cl_event_packet) * max_aens);
213 
214 
215 	/*
216 	 * Total DMA'able memory needed is the sum total of memory needed for
217 	 * all command packets (including the 1 needed for CL internal
218 	 * requests), and memory needed to hold the payload for internal
219 	 * requests.
220 	 */
221 
222 	*dma_mem_size = (sizeof(struct tw_cl_command_packet) *
223 		(max_simult_reqs)) + (TW_CLI_SECTOR_SIZE);
224 
225 	return(0);
226 }
227 
228 
229 
230 /*
231  * Function name:	tw_cl_init_ctlr
232  * Description:		Initializes driver data structures for the controller.
233  *
234  * Input:		ctlr_handle -- controller handle
235  *			flags -- more info passed by the OS Layer
236  *			device_id -- device id of the controller
237  *			max_simult_reqs -- maximum # of simultaneous requests
238  *					that the OS Layer expects the Common
239  *					Layer to support
240  *			max_aens -- maximun # of AEN's needed to be supported
241  *			non_dma_mem -- ptr to allocated non-DMA memory
242  *			dma_mem -- ptr to allocated DMA'able memory
243  *			dma_mem_phys -- physical address of dma_mem
244  * Output:		None
245  * Return value:	0	-- success
246  *			non-zero-- failure
247  */
248 TW_INT32
tw_cl_init_ctlr(struct tw_cl_ctlr_handle * ctlr_handle,TW_UINT32 flags,TW_INT32 device_id,TW_INT32 max_simult_reqs,TW_INT32 max_aens,TW_VOID * non_dma_mem,TW_VOID * dma_mem,TW_UINT64 dma_mem_phys)249 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
250 	TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
251 	TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
252 	)
253 {
254 	struct tw_cli_ctlr_context	*ctlr;
255 	struct tw_cli_req_context	*req;
256 	TW_UINT8			*free_non_dma_mem;
257 	TW_INT32			error = TW_OSL_ESUCCESS;
258 	TW_INT32			i;
259 
260 	tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
261 
262 	if (flags & TW_CL_START_CTLR_ONLY) {
263 		ctlr = (struct tw_cli_ctlr_context *)
264 			(ctlr_handle->cl_ctlr_ctxt);
265 		goto start_ctlr;
266 	}
267 
268 	if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
269 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
270 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
271 			0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
272 			"Too many simultaneous requests to support!",
273 			"requested = %d, supported = %d, error = %d\n",
274 			max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
275 			TW_OSL_EBIG);
276 		return(TW_OSL_EBIG);
277 	}
278 
279 	if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
280 		) {
281 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
282 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
283 			0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
284 			"Insufficient memory for Common Layer's internal usage",
285 			"error = %d\n", TW_OSL_ENOMEM);
286 		return(TW_OSL_ENOMEM);
287 	}
288 
289 	tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
290 		(sizeof(struct tw_cli_req_context) * max_simult_reqs) +
291 		(sizeof(struct tw_cl_event_packet) * max_aens));
292 
293 	tw_osl_memzero(dma_mem,
294 		(sizeof(struct tw_cl_command_packet) *
295 		max_simult_reqs) +
296 		TW_CLI_SECTOR_SIZE);
297 
298 	free_non_dma_mem = (TW_UINT8 *)non_dma_mem;
299 
300 	ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
301 	free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);
302 
303 	ctlr_handle->cl_ctlr_ctxt = ctlr;
304 	ctlr->ctlr_handle = ctlr_handle;
305 
306 	ctlr->device_id = (TW_UINT32)device_id;
307 	ctlr->arch_id = TWA_ARCH_ID(device_id);
308 	ctlr->flags = flags;
309 	ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
310 	ctlr->max_simult_reqs = max_simult_reqs;
311 	ctlr->max_aens_supported = max_aens;
312 
313 	/* Initialize queues of CL internal request context packets. */
314 	tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
315 	tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
316 	tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
317 	tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);
318 	tw_cli_req_q_init(ctlr, TW_CLI_RESET_Q);
319 
320 	/* Initialize all locks used by CL. */
321 	ctlr->gen_lock = &(ctlr->gen_lock_handle);
322 	tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
323 	ctlr->io_lock = &(ctlr->io_lock_handle);
324 	tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
325 
326 	/* Initialize CL internal request context packets. */
327 	ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
328 	free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
329 		max_simult_reqs);
330 
331 	ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
332 	ctlr->cmd_pkt_phys = dma_mem_phys;
333 
334 	ctlr->internal_req_data = (TW_UINT8 *)
335 		(ctlr->cmd_pkt_buf +
336 		max_simult_reqs);
337 	ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
338 		(sizeof(struct tw_cl_command_packet) *
339 		max_simult_reqs);
340 
341 	for (i = 0; i < max_simult_reqs; i++) {
342 		req = &(ctlr->req_ctxt_buf[i]);
343 
344 		req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
345 		req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
346 			(i * sizeof(struct tw_cl_command_packet));
347 
348 		req->request_id = i;
349 		req->ctlr = ctlr;
350 
351 		/* Insert request into the free queue. */
352 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
353 	}
354 
355 	/* Initialize the AEN queue. */
356 	ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;
357 
358 
359 start_ctlr:
360 	/*
361 	 * Disable interrupts.  Interrupts will be enabled in tw_cli_start_ctlr
362 	 * (only) if initialization succeeded.
363 	 */
364 	tw_cli_disable_interrupts(ctlr);
365 
366 	/* Initialize the controller. */
367 	if ((error = tw_cli_start_ctlr(ctlr))) {
368 		/* Soft reset the controller, and try one more time. */
369 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
370 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
371 			0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
372 			"Controller initialization failed. Retrying...",
373 			"error = %d\n", error);
374 		if ((error = tw_cli_soft_reset(ctlr))) {
375 			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
376 				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
377 				0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
378 				"Controller soft reset failed",
379 				"error = %d\n", error);
380 			return(error);
381 		} else if ((error = tw_cli_start_ctlr(ctlr))) {
382 			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
383 				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
384 				0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
385 				"Controller initialization retry failed",
386 				"error = %d\n", error);
387 			return(error);
388 		}
389 	}
390 	/* Notify some info about the controller to the OSL. */
391 	tw_cli_notify_ctlr_info(ctlr);
392 
393 	/* Mark the controller active. */
394 	ctlr->active = TW_CL_TRUE;
395 	return(error);
396 }
397 
398 /*
399  * Function name:	tw_cli_start_ctlr
400  * Description:		Establishes a logical connection with the controller.
401  *			Determines whether or not the driver is compatible
402  *                      with the firmware on the controller, before proceeding
403  *                      to work with it.
404  *
405  * Input:		ctlr	-- ptr to per ctlr structure
406  * Output:		None
407  * Return value:	0	-- success
408  *			non-zero-- failure
409  */
410 TW_INT32
tw_cli_start_ctlr(struct tw_cli_ctlr_context * ctlr)411 tw_cli_start_ctlr(struct tw_cli_ctlr_context *ctlr)
412 {
413 	TW_UINT16	fw_on_ctlr_srl = 0;
414 	TW_UINT16	fw_on_ctlr_arch_id = 0;
415 	TW_UINT16	fw_on_ctlr_branch = 0;
416 	TW_UINT16	fw_on_ctlr_build = 0;
417 	TW_UINT32	init_connect_result = 0;
418 	TW_INT32	error = TW_OSL_ESUCCESS;
419 
420 	tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
421 
422 	/* Wait for the controller to become ready. */
423 	if ((error = tw_cli_poll_status(ctlr,
424 			TWA_STATUS_MICROCONTROLLER_READY,
425 			TW_CLI_REQUEST_TIMEOUT_PERIOD))) {
426 		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
427 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
428 			0x1009, 0x1, TW_CL_SEVERITY_ERROR_STRING,
429 			"Microcontroller not ready",
430 			"error = %d", error);
431 		return(error);
432 	}
433 	/* Drain the response queue. */
434 	if ((error = tw_cli_drain_response_queue(ctlr))) {
435 		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
436 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
437 			0x100A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
438 			"Can't drain response queue",
439 			"error = %d", error);
440 		return(error);
441 	}
442 	/* Establish a logical connection with the controller. */
443 	if ((error = tw_cli_init_connection(ctlr,
444 			(TW_UINT16)(ctlr->max_simult_reqs),
445 			TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
446 			(TW_UINT16)(ctlr->arch_id),
447 			TWA_CURRENT_FW_BRANCH(ctlr->arch_id),
448 			TWA_CURRENT_FW_BUILD(ctlr->arch_id),
449 			&fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
450 			&fw_on_ctlr_branch, &fw_on_ctlr_build,
451 			&init_connect_result))) {
452 		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
453 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
454 			0x100B, 0x2, TW_CL_SEVERITY_WARNING_STRING,
455 			"Can't initialize connection in current mode",
456 			"error = %d", error);
457 		return(error);
458 	}
459 	{
460 		 /* See if we can at least work with the firmware on the
461                  * controller in the current mode.
462 		 */
463 		if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
464 			/* Yes, we can.  Make note of the operating mode. */
465 			if (init_connect_result & TWA_CTLR_FW_SAME_OR_NEWER) {
466 				ctlr->working_srl = TWA_CURRENT_FW_SRL;
467 				ctlr->working_branch =
468 					TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
469 				ctlr->working_build =
470 					TWA_CURRENT_FW_BUILD(ctlr->arch_id);
471 			} else {
472 				ctlr->working_srl = fw_on_ctlr_srl;
473 				ctlr->working_branch = fw_on_ctlr_branch;
474 				ctlr->working_build = fw_on_ctlr_build;
475 			}
476 		} else {
477 			/*
478 			 * No, we can't.  See if we can at least work with
479 			 * it in the base mode.
480 			 */
481 			tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
482 				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
483 				0x1010, 0x2, TW_CL_SEVERITY_WARNING_STRING,
484 				"Driver/Firmware mismatch. "
485 				"Negotiating for base level...",
486 				" ");
487 			if ((error = tw_cli_init_connection(ctlr,
488 					(TW_UINT16)(ctlr->max_simult_reqs),
489 					TWA_EXTENDED_INIT_CONNECT,
490 					TWA_BASE_FW_SRL,
491 					(TW_UINT16)(ctlr->arch_id),
492 					TWA_BASE_FW_BRANCH, TWA_BASE_FW_BUILD,
493 					&fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
494 					&fw_on_ctlr_branch, &fw_on_ctlr_build,
495 					&init_connect_result))) {
496 				tw_cl_create_event(ctlr->ctlr_handle,
497 					TW_CL_FALSE,
498 					TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
499 					0x1011, 0x1,
500 					TW_CL_SEVERITY_ERROR_STRING,
501 					"Can't initialize connection in "
502 					"base mode",
503 					" ");
504 				return(error);
505 			}
506 			if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
507 				/*
508 				 * The firmware on the controller is not even
509 				 * compatible with our base mode.  We cannot
510 				 * work with it.  Bail...
511 				 */
512 				return(1);
513 			}
514 			/*
515 			 * We can work with this firmware, but only in
516 			 * base mode.
517 			 */
518 			ctlr->working_srl = TWA_BASE_FW_SRL;
519 			ctlr->working_branch = TWA_BASE_FW_BRANCH;
520 			ctlr->working_build = TWA_BASE_FW_BUILD;
521 			ctlr->operating_mode = TWA_BASE_MODE;
522 		}
523 		ctlr->fw_on_ctlr_srl = fw_on_ctlr_srl;
524 		ctlr->fw_on_ctlr_branch = fw_on_ctlr_branch;
525 		ctlr->fw_on_ctlr_build = fw_on_ctlr_build;
526 	}
527 
528 	/* Drain the AEN queue */
529 	if ((error = tw_cli_drain_aen_queue(ctlr)))
530 		/*
531 		 * We will just print that we couldn't drain the AEN queue.
532 		 * There's no need to bail out.
533 		 */
534 		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
535 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
536 			0x1014, 0x2, TW_CL_SEVERITY_WARNING_STRING,
537 			"Can't drain AEN queue",
538 			"error = %d", error);
539 
540 	/* Enable interrupts. */
541 	tw_cli_enable_interrupts(ctlr);
542 
543 	return(TW_OSL_ESUCCESS);
544 }
545 
546 
547 /*
548  * Function name:	tw_cl_shutdown_ctlr
549  * Description:		Closes logical connection with the controller.
550  *
551  * Input:		ctlr	-- ptr to per ctlr structure
552  *			flags	-- more info passed by the OS Layer
553  * Output:		None
554  * Return value:	0	-- success
555  *			non-zero-- failure
556  */
557 TW_INT32
tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle * ctlr_handle,TW_UINT32 flags)558 tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags)
559 {
560 	struct tw_cli_ctlr_context	*ctlr =
561 		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
562 	TW_INT32			error;
563 
564 	tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
565 	/*
566 	 * Mark the controller as inactive, disable any further interrupts,
567 	 * and notify the controller that we are going down.
568 	 */
569 	ctlr->active = TW_CL_FALSE;
570 
571 	tw_cli_disable_interrupts(ctlr);
572 
573 	/* Let the controller know that we are going down. */
574 	if ((error = tw_cli_init_connection(ctlr, TWA_SHUTDOWN_MESSAGE_CREDITS,
575 			0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
576 			TW_CL_NULL, TW_CL_NULL)))
577 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
578 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
579 			0x1015, 0x1, TW_CL_SEVERITY_ERROR_STRING,
580 			"Can't close connection with controller",
581 			"error = %d", error);
582 
583 	if (flags & TW_CL_STOP_CTLR_ONLY)
584 		goto ret;
585 
586 	/* Destroy all locks used by CL. */
587 	tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock);
588 	tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock);
589 
590 ret:
591 	return(error);
592 }
593 
594 
595 
596 /*
597  * Function name:	tw_cli_init_connection
598  * Description:		Sends init_connection cmd to firmware
599  *
600  * Input:		ctlr		-- ptr to per ctlr structure
601  *			message_credits	-- max # of requests that we might send
602  *					 down simultaneously.  This will be
603  *					 typically set to 256 at init-time or
604  *					after a reset, and to 1 at shutdown-time
605  *			set_features	-- indicates if we intend to use 64-bit
606  *					sg, also indicates if we want to do a
607  *					basic or an extended init_connection;
608  *
609  * Note: The following input/output parameters are valid, only in case of an
610  *		extended init_connection:
611  *
612  *			current_fw_srl		-- srl of fw we are bundled
613  *						with, if any; 0 otherwise
614  *			current_fw_arch_id	-- arch_id of fw we are bundled
615  *						with, if any; 0 otherwise
616  *			current_fw_branch	-- branch # of fw we are bundled
617  *						with, if any; 0 otherwise
618  *			current_fw_build	-- build # of fw we are bundled
619  *						with, if any; 0 otherwise
620  * Output:		fw_on_ctlr_srl		-- srl of fw on ctlr
621  *			fw_on_ctlr_arch_id	-- arch_id of fw on ctlr
622  *			fw_on_ctlr_branch	-- branch # of fw on ctlr
623  *			fw_on_ctlr_build	-- build # of fw on ctlr
624  *			init_connect_result	-- result bitmap of fw response
625  * Return value:	0	-- success
626  *			non-zero-- failure
627  */
628 TW_INT32
tw_cli_init_connection(struct tw_cli_ctlr_context * ctlr,TW_UINT16 message_credits,TW_UINT32 set_features,TW_UINT16 current_fw_srl,TW_UINT16 current_fw_arch_id,TW_UINT16 current_fw_branch,TW_UINT16 current_fw_build,TW_UINT16 * fw_on_ctlr_srl,TW_UINT16 * fw_on_ctlr_arch_id,TW_UINT16 * fw_on_ctlr_branch,TW_UINT16 * fw_on_ctlr_build,TW_UINT32 * init_connect_result)629 tw_cli_init_connection(struct tw_cli_ctlr_context *ctlr,
630 	TW_UINT16 message_credits, TW_UINT32 set_features,
631 	TW_UINT16 current_fw_srl, TW_UINT16 current_fw_arch_id,
632 	TW_UINT16 current_fw_branch, TW_UINT16 current_fw_build,
633 	TW_UINT16 *fw_on_ctlr_srl, TW_UINT16 *fw_on_ctlr_arch_id,
634 	TW_UINT16 *fw_on_ctlr_branch, TW_UINT16 *fw_on_ctlr_build,
635 	TW_UINT32 *init_connect_result)
636 {
637 	struct tw_cli_req_context		*req;
638 	struct tw_cl_command_init_connect	*init_connect;
639 	TW_INT32				error = TW_OSL_EBUSY;
640 
641 	tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
642 
643 	/* Get a request packet. */
644 	if ((req = tw_cli_get_request(ctlr
645 		)) == TW_CL_NULL)
646 		goto out;
647 
648 	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
649 
650 	/* Build the cmd pkt. */
651 	init_connect = &(req->cmd_pkt->command.cmd_pkt_7k.init_connect);
652 
653 	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
654 
655 	init_connect->res1__opcode =
656 		BUILD_RES__OPCODE(0, TWA_FW_CMD_INIT_CONNECTION);
657 	init_connect->request_id =
658 		(TW_UINT8)(TW_CL_SWAP16(req->request_id));
659 	init_connect->message_credits = TW_CL_SWAP16(message_credits);
660 	init_connect->features = TW_CL_SWAP32(set_features);
661 	if (ctlr->flags & TW_CL_64BIT_ADDRESSES)
662 		init_connect->features |= TW_CL_SWAP32(TWA_64BIT_SG_ADDRESSES);
663 	if (set_features & TWA_EXTENDED_INIT_CONNECT) {
664 		/*
665 		 * Fill in the extra fields needed for an extended
666 		 * init_connect.
667 		 */
668 		init_connect->size = 6;
669 		init_connect->fw_srl = TW_CL_SWAP16(current_fw_srl);
670 		init_connect->fw_arch_id = TW_CL_SWAP16(current_fw_arch_id);
671 		init_connect->fw_branch = TW_CL_SWAP16(current_fw_branch);
672 		init_connect->fw_build = TW_CL_SWAP16(current_fw_build);
673 	} else
674 		init_connect->size = 3;
675 
676 	/* Submit the command, and wait for it to complete. */
677 	error = tw_cli_submit_and_poll_request(req,
678 		TW_CLI_REQUEST_TIMEOUT_PERIOD);
679 	if (error)
680 		goto out;
681 	if ((error = init_connect->status)) {
682 #if       0
683 		tw_cli_create_ctlr_event(ctlr,
684 			TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
685 			&(req->cmd_pkt->cmd_hdr));
686 #endif // 0
687 		goto out;
688 	}
689 	if (set_features & TWA_EXTENDED_INIT_CONNECT) {
690 		*fw_on_ctlr_srl = TW_CL_SWAP16(init_connect->fw_srl);
691 		*fw_on_ctlr_arch_id = TW_CL_SWAP16(init_connect->fw_arch_id);
692 		*fw_on_ctlr_branch = TW_CL_SWAP16(init_connect->fw_branch);
693 		*fw_on_ctlr_build = TW_CL_SWAP16(init_connect->fw_build);
694 		*init_connect_result = TW_CL_SWAP32(init_connect->result);
695 	}
696 	tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
697 	return(error);
698 
699 out:
700 	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
701 		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
702 		0x1016, 0x1, TW_CL_SEVERITY_ERROR_STRING,
703 		"init_connection failed",
704 		"error = %d", error);
705 	if (req)
706 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
707 	return(error);
708 }
709