xref: /dragonfly/sys/dev/raid/twa/tw_cl_io.c (revision d8082429)
1 /*
2  * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3  * Copyright (c) 2004-05 Vinod Kashyap
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: head/sys/dev/twa/tw_cl_io.c 212008 2010-08-30 19:15:04Z delphij $
28  */
29 
30 /*
31  * AMCC'S 3ware driver for 9000 series storage controllers.
32  *
33  * Author: Vinod Kashyap
34  * Modifications by: Adam Radford
35  * Modifications by: Manjunath Ranganathaiah
36  */
37 
38 
39 /*
40  * Common Layer I/O functions.
41  */
42 
43 
44 #include "tw_osl_share.h"
45 #include "tw_cl_share.h"
46 #include "tw_cl_fwif.h"
47 #include "tw_cl_ioctl.h"
48 #include "tw_cl.h"
49 #include "tw_cl_externs.h"
50 #include "tw_osl_ioctl.h"
51 
52 #include <bus/cam/cam.h>
53 #include <bus/cam/cam_ccb.h>
54 #include <bus/cam/cam_xpt_sim.h>
55 
56 
57 
58 /*
59  * Function name:	tw_cl_start_io
60  * Description:		Interface to OS Layer for accepting SCSI requests.
61  *
62  * Input:		ctlr_handle	-- controller handle
63  *			req_pkt		-- OSL built request packet
64  *			req_handle	-- request handle
65  * Output:		None
66  * Return value:	0	-- success
67  *			non-zero-- failure
68  */
69 TW_INT32
70 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
71 	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
72 {
73 	struct tw_cli_ctlr_context		*ctlr;
74 	struct tw_cli_req_context		*req;
75 	struct tw_cl_command_9k			*cmd;
76 	struct tw_cl_scsi_req_packet		*scsi_req;
77 	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;
78 
79 	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
80 
81 	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
82 
83 	/*
84 	 * If working with a firmware version that does not support multiple
85 	 * luns, and this request is directed at a non-zero lun, error it
86 	 * back right away.
87 	 */
88 	if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
89 		(ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
90 		req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
91 			TW_CL_ERR_REQ_SCSI_ERROR);
92 		req_pkt->tw_osl_callback(req_handle);
93 		return(TW_CL_ERR_REQ_SUCCESS);
94 	}
95 
96 	if ((req = tw_cli_get_request(ctlr
97 		)) == TW_CL_NULL) {
98 		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
99 			"Out of request context packets: returning busy");
100 		return(TW_OSL_EBUSY);
101 	}
102 
103 	req_handle->cl_req_ctxt = req;
104 	req->req_handle = req_handle;
105 	req->orig_req = req_pkt;
106 	req->tw_cli_callback = tw_cli_complete_io;
107 
108 	req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
109 	req->flags |= TW_CLI_REQ_FLAGS_9K;
110 
111 	scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
112 
113 	/* Build the cmd pkt. */
114 	cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
115 
116 	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
117 
118 	cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
119 	cmd->unit = (TW_UINT8)(scsi_req->unit);
120 	cmd->lun_l4__req_id = TW_CL_SWAP16(
121 		BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
122 	cmd->status = 0;
123 	cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
124 	tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
125 
126 	if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
127 		TW_UINT32	num_sgl_entries;
128 
129 		req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
130 			&num_sgl_entries);
131 		cmd->lun_h4__sgl_entries =
132 			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
133 				num_sgl_entries));
134 	} else {
135 		cmd->lun_h4__sgl_entries =
136 			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
137 				scsi_req->sgl_entries));
138 		tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
139 			cmd->sg_list, scsi_req->sgl_entries);
140 	}
141 
142 	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
143 		(ctlr->reset_in_progress)) {
144 		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
145 		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
146 			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
147 	} else if ((error = tw_cli_submit_cmd(req))) {
148 		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
149 			"Could not start request. request = %p, error = %d",
150 			req, error);
151 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
152 	}
153 	return(error);
154 }
155 
156 
157 
158 /*
159  * Function name:	tw_cli_submit_cmd
160  * Description:		Submits a cmd to firmware.
161  *
162  * Input:		req	-- ptr to CL internal request context
163  * Output:		None
164  * Return value:	0	-- success
165  *			non-zero-- failure
166  */
167 TW_INT32
168 tw_cli_submit_cmd(struct tw_cli_req_context *req)
169 {
170 	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
171 	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
172 	TW_UINT32			status_reg;
173 	TW_INT32			error = 0;
174 
175 	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
176 
177 	/* Serialize access to the controller cmd queue. */
178 	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
179 
180 	/* For 9650SE first write low 4 bytes */
181 	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
182 	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
183 		tw_osl_write_reg(ctlr_handle,
184 				 TWA_COMMAND_QUEUE_OFFSET_LOW,
185 				 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
186 
187 	status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
188 	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
189 		struct tw_cl_req_packet	*req_pkt =
190 			(struct tw_cl_req_packet *)(req->orig_req);
191 
192 		tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
193 			"Cmd queue full");
194 
195 		if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
196 			|| ((req_pkt) &&
197 			(req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
198 			) {
199 			if (req->state != TW_CLI_REQ_STATE_PENDING) {
200 				tw_cli_dbg_printf(2, ctlr_handle,
201 					tw_osl_cur_func(),
202 					"pending internal/ioctl request");
203 				req->state = TW_CLI_REQ_STATE_PENDING;
204 				tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
205 				/* Unmask command interrupt. */
206 				TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
207 					TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
208 			} else
209 				error = TW_OSL_EBUSY;
210 		} else {
211 			error = TW_OSL_EBUSY;
212 		}
213 	} else {
214 		tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
215 			"Submitting command");
216 
217 		/* Insert command into busy queue */
218 		req->state = TW_CLI_REQ_STATE_BUSY;
219 		tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
220 
221 		if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
222 		    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
223 			/* Now write the high 4 bytes */
224 			tw_osl_write_reg(ctlr_handle,
225 					 TWA_COMMAND_QUEUE_OFFSET_HIGH,
226 					 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
227 		} else {
228 			if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
229 				/* First write the low 4 bytes, then the high 4. */
230 				tw_osl_write_reg(ctlr_handle,
231 						 TWA_COMMAND_QUEUE_OFFSET_LOW,
232 						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
233 				tw_osl_write_reg(ctlr_handle,
234 						 TWA_COMMAND_QUEUE_OFFSET_HIGH,
235 						 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
236 			} else
237 				tw_osl_write_reg(ctlr_handle,
238 						 TWA_COMMAND_QUEUE_OFFSET,
239 						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
240 		}
241 	}
242 
243 	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
244 
245 	return(error);
246 }
247 
248 
249 
250 /*
251  * Function name:	tw_cl_fw_passthru
252  * Description:		Interface to OS Layer for accepting firmware
253  *			passthru requests.
254  * Input:		ctlr_handle	-- controller handle
255  *			req_pkt		-- OSL built request packet
256  *			req_handle	-- request handle
257  * Output:		None
258  * Return value:	0	-- success
259  *			non-zero-- failure
260  */
261 TW_INT32
262 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
263 	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
264 {
265 	struct tw_cli_ctlr_context		*ctlr;
266 	struct tw_cli_req_context		*req;
267 	union tw_cl_command_7k			*cmd_7k;
268 	struct tw_cl_command_9k			*cmd_9k;
269 	struct tw_cl_passthru_req_packet	*pt_req;
270 	TW_UINT8				opcode;
271 	TW_UINT8				sgl_offset;
272 	TW_VOID					*sgl = TW_CL_NULL;
273 	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;
274 
275 	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
276 
277 	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
278 
279 	if ((req = tw_cli_get_request(ctlr
280 		)) == TW_CL_NULL) {
281 		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
282 			"Out of request context packets: returning busy");
283 		return(TW_OSL_EBUSY);
284 	}
285 
286 	req_handle->cl_req_ctxt = req;
287 	req->req_handle = req_handle;
288 	req->orig_req = req_pkt;
289 	req->tw_cli_callback = tw_cli_complete_io;
290 
291 	req->flags |= TW_CLI_REQ_FLAGS_PASSTHRU;
292 
293 	pt_req = &(req_pkt->gen_req_pkt.pt_req);
294 
295 	tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
296 		pt_req->cmd_pkt_length);
297 	/* Build the cmd pkt. */
298 	if ((opcode = GET_OPCODE(((TW_UINT8 *)
299 		(pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
300 			== TWA_FW_CMD_EXECUTE_SCSI) {
301 		TW_UINT16	lun_l4, lun_h4;
302 
303 		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
304 			"passthru: 9k cmd pkt");
305 		req->flags |= TW_CLI_REQ_FLAGS_9K;
306 		cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
307 		lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
308 		lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
309 		cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
310 			BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
311 		if (pt_req->sgl_entries) {
312 			cmd_9k->lun_h4__sgl_entries =
313 				TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
314 					pt_req->sgl_entries));
315 			sgl = (TW_VOID *)(cmd_9k->sg_list);
316 		}
317 	} else {
318 		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
319 			"passthru: 7k cmd pkt");
320 		cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
321 		cmd_7k->generic.request_id =
322 			(TW_UINT8)(TW_CL_SWAP16(req->request_id));
323 		if ((sgl_offset =
324 			GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
325 			if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
326 				sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
327 			else
328 				sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
329 			cmd_7k->generic.size += pt_req->sgl_entries *
330 				((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
331 		}
332 	}
333 
334 	if (sgl)
335 		tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
336 			sgl, pt_req->sgl_entries);
337 
338 	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
339 		(ctlr->reset_in_progress)) {
340 		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
341 		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
342 			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
343 	} else if ((error = tw_cli_submit_cmd(req))) {
344 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
345 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
346 			0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
347 			"Failed to start passthru command",
348 			"error = %d", error);
349 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
350 	}
351 	return(error);
352 }
353 
354 
355 
356 /*
357  * Function name:	tw_cl_ioctl
358  * Description:		Handler of CL supported ioctl cmds.
359  *
360  * Input:		ctlr	-- ptr to per ctlr structure
361  *			cmd	-- ioctl cmd
362  *			buf	-- ptr to buffer in kernel memory, which is
363  *				   a copy of the input buffer in user-space
364  * Output:		buf	-- ptr to buffer in kernel memory, which will
365  *				   need to be copied to the output buffer in
366  *				   user-space
367  * Return value:	0	-- success
368  *			non-zero-- failure
369  */
370 TW_INT32
371 tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
372 {
373 	struct tw_cli_ctlr_context	*ctlr =
374 		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
375 	struct tw_cl_ioctl_packet	*user_buf =
376 		(struct tw_cl_ioctl_packet *)buf;
377 	struct tw_cl_event_packet	event_buf;
378 	TW_INT32			event_index;
379 	TW_INT32			start_index;
380 	TW_INT32			error = TW_OSL_ESUCCESS;
381 
382 	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
383 
384 	/* Serialize access to the AEN queue and the ioctl lock. */
385 	tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
386 
387 	switch (cmd) {
388 	case TW_CL_IOCTL_GET_FIRST_EVENT:
389 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
390 			"Get First Event");
391 
392 		if (ctlr->aen_q_wrapped) {
393 			if (ctlr->aen_q_overflow) {
394 				/*
395 				 * The aen queue has wrapped, even before some
396 				 * events have been retrieved.  Let the caller
397 				 * know that he missed out on some AEN's.
398 				 */
399 				user_buf->driver_pkt.status =
400 					TW_CL_ERROR_AEN_OVERFLOW;
401 				ctlr->aen_q_overflow = TW_CL_FALSE;
402 			} else
403 				user_buf->driver_pkt.status = 0;
404 			event_index = ctlr->aen_head;
405 		} else {
406 			if (ctlr->aen_head == ctlr->aen_tail) {
407 				user_buf->driver_pkt.status =
408 					TW_CL_ERROR_AEN_NO_EVENTS;
409 				break;
410 			}
411 			user_buf->driver_pkt.status = 0;
412 			event_index = ctlr->aen_tail;	/* = 0 */
413 		}
414 		tw_osl_memcpy(user_buf->data_buf,
415 			&(ctlr->aen_queue[event_index]),
416 			sizeof(struct tw_cl_event_packet));
417 
418 		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
419 
420 		break;
421 
422 
423 	case TW_CL_IOCTL_GET_LAST_EVENT:
424 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
425 			"Get Last Event");
426 
427 		if (ctlr->aen_q_wrapped) {
428 			if (ctlr->aen_q_overflow) {
429 				/*
430 				 * The aen queue has wrapped, even before some
431 				 * events have been retrieved.  Let the caller
432 				 * know that he missed out on some AEN's.
433 				 */
434 				user_buf->driver_pkt.status =
435 					TW_CL_ERROR_AEN_OVERFLOW;
436 				ctlr->aen_q_overflow = TW_CL_FALSE;
437 			} else
438 				user_buf->driver_pkt.status = 0;
439 		} else {
440 			if (ctlr->aen_head == ctlr->aen_tail) {
441 				user_buf->driver_pkt.status =
442 					TW_CL_ERROR_AEN_NO_EVENTS;
443 				break;
444 			}
445 			user_buf->driver_pkt.status = 0;
446 		}
447 		event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
448 			ctlr->max_aens_supported;
449 
450 		tw_osl_memcpy(user_buf->data_buf,
451 			&(ctlr->aen_queue[event_index]),
452 			sizeof(struct tw_cl_event_packet));
453 
454 		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
455 
456 		break;
457 
458 
459 	case TW_CL_IOCTL_GET_NEXT_EVENT:
460 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
461 			"Get Next Event");
462 
463 		user_buf->driver_pkt.status = 0;
464 		if (ctlr->aen_q_wrapped) {
465 			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
466 				"Get Next Event: wrapped");
467 			if (ctlr->aen_q_overflow) {
468 				/*
469 				 * The aen queue has wrapped, even before some
470 				 * events have been retrieved.  Let the caller
471 				 * know that he missed out on some AEN's.
472 				 */
473 				tw_cli_dbg_printf(2, ctlr_handle,
474 					tw_osl_cur_func(),
475 					"Get Next Event: overflow");
476 				user_buf->driver_pkt.status =
477 					TW_CL_ERROR_AEN_OVERFLOW;
478 				ctlr->aen_q_overflow = TW_CL_FALSE;
479 			}
480 			start_index = ctlr->aen_head;
481 		} else {
482 			if (ctlr->aen_head == ctlr->aen_tail) {
483 				tw_cli_dbg_printf(3, ctlr_handle,
484 					tw_osl_cur_func(),
485 					"Get Next Event: empty queue");
486 				user_buf->driver_pkt.status =
487 					TW_CL_ERROR_AEN_NO_EVENTS;
488 				break;
489 			}
490 			start_index = ctlr->aen_tail;	/* = 0 */
491 		}
492 		tw_osl_memcpy(&event_buf, user_buf->data_buf,
493 			sizeof(struct tw_cl_event_packet));
494 
495 		event_index = (start_index + event_buf.sequence_id -
496 			ctlr->aen_queue[start_index].sequence_id + 1) %
497 			ctlr->max_aens_supported;
498 
499 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
500 			"Get Next Event: si = %x, ei = %x, ebsi = %x, "
501 			"sisi = %x, eisi = %x",
502 			start_index, event_index, event_buf.sequence_id,
503 			ctlr->aen_queue[start_index].sequence_id,
504 			ctlr->aen_queue[event_index].sequence_id);
505 
506 		if (! (ctlr->aen_queue[event_index].sequence_id >
507 			event_buf.sequence_id)) {
508 			/*
509 			 * We don't have any event matching the criterion.  So,
510 			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
511 			 * encountered an overflow condition above, we cannot
512 			 * report both conditions during this call.  We choose
513 			 * to report NO_EVENTS this time, and an overflow the
514 			 * next time we are called.
515 			 */
516 			if (user_buf->driver_pkt.status ==
517 				TW_CL_ERROR_AEN_OVERFLOW) {
518 				/*
519 				 * Make a note so we report the overflow
520 				 * next time.
521 				 */
522 				ctlr->aen_q_overflow = TW_CL_TRUE;
523 			}
524 			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
525 			break;
526 		}
527 		/* Copy the event -- even if there has been an overflow. */
528 		tw_osl_memcpy(user_buf->data_buf,
529 			&(ctlr->aen_queue[event_index]),
530 			sizeof(struct tw_cl_event_packet));
531 
532 		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
533 
534 		break;
535 
536 
537 	case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
538 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
539 			"Get Previous Event");
540 
541 		user_buf->driver_pkt.status = 0;
542 		if (ctlr->aen_q_wrapped) {
543 			if (ctlr->aen_q_overflow) {
544 				/*
545 				 * The aen queue has wrapped, even before some
546 				 * events have been retrieved.  Let the caller
547 				 * know that he missed out on some AEN's.
548 				 */
549 				user_buf->driver_pkt.status =
550 					TW_CL_ERROR_AEN_OVERFLOW;
551 				ctlr->aen_q_overflow = TW_CL_FALSE;
552 			}
553 			start_index = ctlr->aen_head;
554 		} else {
555 			if (ctlr->aen_head == ctlr->aen_tail) {
556 				user_buf->driver_pkt.status =
557 					TW_CL_ERROR_AEN_NO_EVENTS;
558 				break;
559 			}
560 			start_index = ctlr->aen_tail;	/* = 0 */
561 		}
562 		tw_osl_memcpy(&event_buf, user_buf->data_buf,
563 			sizeof(struct tw_cl_event_packet));
564 
565 		event_index = (start_index + event_buf.sequence_id -
566 			ctlr->aen_queue[start_index].sequence_id - 1) %
567 			ctlr->max_aens_supported;
568 
569 		if (! (ctlr->aen_queue[event_index].sequence_id <
570 			event_buf.sequence_id)) {
571 			/*
572 			 * We don't have any event matching the criterion.  So,
573 			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
574 			 * encountered an overflow condition above, we cannot
575 			 * report both conditions during this call.  We choose
576 			 * to report NO_EVENTS this time, and an overflow the
577 			 * next time we are called.
578 			 */
579 			if (user_buf->driver_pkt.status ==
580 				TW_CL_ERROR_AEN_OVERFLOW) {
581 				/*
582 				 * Make a note so we report the overflow
583 				 * next time.
584 				 */
585 				ctlr->aen_q_overflow = TW_CL_TRUE;
586 			}
587 			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
588 			break;
589 		}
590 		/* Copy the event -- even if there has been an overflow. */
591 		tw_osl_memcpy(user_buf->data_buf,
592 			&(ctlr->aen_queue[event_index]),
593 			sizeof(struct tw_cl_event_packet));
594 
595 		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
596 
597 		break;
598 
599 
600 	case TW_CL_IOCTL_GET_LOCK:
601 	{
602 		struct tw_cl_lock_packet	lock_pkt;
603 		TW_TIME				cur_time;
604 
605 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
606 			"Get ioctl lock");
607 
608 		cur_time = tw_osl_get_local_time();
609 		tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
610 			sizeof(struct tw_cl_lock_packet));
611 
612 		if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
613 			(lock_pkt.force_flag) ||
614 			(cur_time >= ctlr->ioctl_lock.timeout)) {
615 			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
616 				"GET_LOCK: Getting lock!");
617 			ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
618 			ctlr->ioctl_lock.timeout =
619 				cur_time + (lock_pkt.timeout_msec / 1000);
620 			lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
621 			user_buf->driver_pkt.status = 0;
622 		} else {
623 			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
624 				"GET_LOCK: Lock already held!");
625 			lock_pkt.time_remaining_msec = (TW_UINT32)(
626 				(ctlr->ioctl_lock.timeout - cur_time) * 1000);
627 			user_buf->driver_pkt.status =
628 				TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
629 		}
630 		tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
631 			sizeof(struct tw_cl_lock_packet));
632 		break;
633 	}
634 
635 
636 	case TW_CL_IOCTL_RELEASE_LOCK:
637 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
638 			"Release ioctl lock");
639 
640 		if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
641 			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
642 				"twa_ioctl: RELEASE_LOCK: Lock not held!");
643 			user_buf->driver_pkt.status =
644 				TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
645 		} else {
646 			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
647 				"RELEASE_LOCK: Releasing lock!");
648 			ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
649 			user_buf->driver_pkt.status = 0;
650 		}
651 		break;
652 
653 
654 	case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
655 	{
656 		struct tw_cl_compatibility_packet	comp_pkt;
657 
658 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
659 			"Get compatibility info");
660 
661 		tw_osl_memcpy(comp_pkt.driver_version,
662 			TW_OSL_DRIVER_VERSION_STRING,
663 			sizeof(TW_OSL_DRIVER_VERSION_STRING));
664 		comp_pkt.working_srl = ctlr->working_srl;
665 		comp_pkt.working_branch = ctlr->working_branch;
666 		comp_pkt.working_build = ctlr->working_build;
667 		comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
668 		comp_pkt.driver_branch_high =
669 			TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
670 		comp_pkt.driver_build_high =
671 			TWA_CURRENT_FW_BUILD(ctlr->arch_id);
672 		comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
673 		comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
674 		comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
675 		comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
676 		comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
677 		comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
678 		user_buf->driver_pkt.status = 0;
679 
680 		/* Copy compatibility information to user space. */
681 		tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
682 			(sizeof(struct tw_cl_compatibility_packet) <
683 			user_buf->driver_pkt.buffer_length) ?
684 			sizeof(struct tw_cl_compatibility_packet) :
685 			user_buf->driver_pkt.buffer_length);
686 		break;
687 	}
688 
689 	default:
690 		/* Unknown opcode. */
691 		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
692 			"Unknown ioctl cmd 0x%lx", cmd);
693 		error = TW_OSL_ENOTTY;
694 	}
695 
696 	tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
697 	return(error);
698 }
699 
700 
701 
702 /*
703  * Function name:	tw_cli_get_param
704  * Description:		Get a firmware parameter.
705  *
706  * Input:		ctlr		-- ptr to per ctlr structure
707  *			table_id	-- parameter table #
708  *			param_id	-- index of the parameter in the table
709  *			param_size	-- size of the parameter in bytes
710  *			callback	-- ptr to function, if any, to be called
711  *					back on completion; TW_CL_NULL if no callback.
712  * Output:		param_data	-- param value
713  * Return value:	0	-- success
714  *			non-zero-- failure
715  */
716 TW_INT32
717 tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
718 	TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
719 	TW_VOID (* callback)(struct tw_cli_req_context *req))
720 {
721 	struct tw_cli_req_context	*req;
722 	union tw_cl_command_7k		*cmd;
723 	struct tw_cl_param_9k		*param = TW_CL_NULL;
724 	TW_INT32			error = TW_OSL_EBUSY;
725 
726 	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
727 
728 	/* Get a request packet. */
729 	if ((req = tw_cli_get_request(ctlr
730 		)) == TW_CL_NULL)
731 		goto out;
732 
733 	/* Make sure this is the only CL internal request at this time. */
734 	if (ctlr->internal_req_busy) {
735 		error = TW_OSL_EBUSY;
736 		goto out;
737 	}
738 	ctlr->internal_req_busy = TW_CL_TRUE;
739 	req->data = ctlr->internal_req_data;
740 	req->data_phys = ctlr->internal_req_data_phys;
741 	req->length = TW_CLI_SECTOR_SIZE;
742 	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
743 
744 	/* Initialize memory to read data into. */
745 	param = (struct tw_cl_param_9k *)(req->data);
746 	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
747 
748 	/* Build the cmd pkt. */
749 	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
750 
751 	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
752 
753 	cmd->param.sgl_off__opcode =
754 		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
755 	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
756 	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
757 	cmd->param.param_count = TW_CL_SWAP16(1);
758 
759 	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
760 		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
761 			TW_CL_SWAP64(req->data_phys);
762 		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
763 			TW_CL_SWAP32(req->length);
764 		cmd->param.size = 2 + 3;
765 	} else {
766 		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
767 			TW_CL_SWAP32(req->data_phys);
768 		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
769 			TW_CL_SWAP32(req->length);
770 		cmd->param.size = 2 + 2;
771 	}
772 
773 	/* Specify which parameter we need. */
774 	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
775 	param->parameter_id = (TW_UINT8)(param_id);
776 	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
777 
778 	/* Submit the command. */
779 	if (callback == TW_CL_NULL) {
780 		/* There's no call back; wait till the command completes. */
781 		error = tw_cli_submit_and_poll_request(req,
782 				TW_CLI_REQUEST_TIMEOUT_PERIOD);
783 		if (error)
784 			goto out;
785 		if ((error = cmd->param.status)) {
786 #if       0
787 			tw_cli_create_ctlr_event(ctlr,
788 				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
789 				&(req->cmd_pkt->cmd_hdr));
790 #endif // 0
791 			goto out;
792 		}
793 		tw_osl_memcpy(param_data, param->data, param_size);
794 		ctlr->internal_req_busy = TW_CL_FALSE;
795 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
796 	} else {
797 		/* There's a call back.  Simply submit the command. */
798 		req->tw_cli_callback = callback;
799 		if ((error = tw_cli_submit_cmd(req)))
800 			goto out;
801 	}
802 	return(0);
803 
804 out:
805 	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
806 		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
807 		0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
808 		"get_param failed",
809 		"error = %d", error);
810 	if (param)
811 		ctlr->internal_req_busy = TW_CL_FALSE;
812 	if (req)
813 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
814 	return(1);
815 }
816 
817 
818 
819 /*
820  * Function name:	tw_cli_set_param
821  * Description:		Set a firmware parameter.
822  *
823  * Input:		ctlr		-- ptr to per ctlr structure
824  *			table_id	-- parameter table #
825  *			param_id	-- index of the parameter in the table
826  *			param_size	-- size of the parameter in bytes
827  *			callback	-- ptr to function, if any, to be called
828  *					back on completion; TW_CL_NULL if no callback.
829  * Output:		None
830  * Return value:	0	-- success
831  *			non-zero-- failure
832  */
833 TW_INT32
834 tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
835 	TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
836 	TW_VOID (* callback)(struct tw_cli_req_context *req))
837 {
838 	struct tw_cli_req_context	*req;
839 	union tw_cl_command_7k		*cmd;
840 	struct tw_cl_param_9k		*param = TW_CL_NULL;
841 	TW_INT32			error = TW_OSL_EBUSY;
842 
843 	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
844 
845 	/* Get a request packet. */
846 	if ((req = tw_cli_get_request(ctlr
847 		)) == TW_CL_NULL)
848 		goto out;
849 
850 	/* Make sure this is the only CL internal request at this time. */
851 	if (ctlr->internal_req_busy) {
852 		error = TW_OSL_EBUSY;
853 		goto out;
854 	}
855 	ctlr->internal_req_busy = TW_CL_TRUE;
856 	req->data = ctlr->internal_req_data;
857 	req->data_phys = ctlr->internal_req_data_phys;
858 	req->length = TW_CLI_SECTOR_SIZE;
859 	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
860 
861 	/* Initialize memory to send data using. */
862 	param = (struct tw_cl_param_9k *)(req->data);
863 	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
864 
865 	/* Build the cmd pkt. */
866 	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
867 
868 	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
869 
870 	cmd->param.sgl_off__opcode =
871 		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
872 	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
873 	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
874 	cmd->param.param_count = TW_CL_SWAP16(1);
875 
876 	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
877 		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
878 			TW_CL_SWAP64(req->data_phys);
879 		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
880 			TW_CL_SWAP32(req->length);
881 		cmd->param.size = 2 + 3;
882 	} else {
883 		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
884 			TW_CL_SWAP32(req->data_phys);
885 		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
886 			TW_CL_SWAP32(req->length);
887 		cmd->param.size = 2 + 2;
888 	}
889 
890 	/* Specify which parameter we want to set. */
891 	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
892 	param->parameter_id = (TW_UINT8)(param_id);
893 	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
894 	tw_osl_memcpy(param->data, data, param_size);
895 
896 	/* Submit the command. */
897 	if (callback == TW_CL_NULL) {
898 		/* There's no call back; wait till the command completes. */
899 		error = tw_cli_submit_and_poll_request(req,
900 				TW_CLI_REQUEST_TIMEOUT_PERIOD);
901 		if (error)
902 			goto out;
903 		if ((error = cmd->param.status)) {
904 #if       0
905 			tw_cli_create_ctlr_event(ctlr,
906 				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
907 				&(req->cmd_pkt->cmd_hdr));
908 #endif // 0
909 			goto out;
910 		}
911 		ctlr->internal_req_busy = TW_CL_FALSE;
912 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
913 	} else {
914 		/* There's a call back.  Simply submit the command. */
915 		req->tw_cli_callback = callback;
916 		if ((error = tw_cli_submit_cmd(req)))
917 			goto out;
918 	}
919 	return(error);
920 
921 out:
922 	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
923 		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
924 		0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
925 		"set_param failed",
926 		"error = %d", error);
927 	if (param)
928 		ctlr->internal_req_busy = TW_CL_FALSE;
929 	if (req)
930 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
931 	return(error);
932 }
933 
934 
935 
936 /*
937  * Function name:	tw_cli_submit_and_poll_request
938  * Description:		Sends down a firmware cmd, and waits for the completion
939  *			in a tight loop.
940  *
941  * Input:		req	-- ptr to request pkt
942  *			timeout -- max # of seconds to wait before giving up
943  * Output:		None
944  * Return value:	0	-- success
945  *			non-zero-- failure
946  */
947 TW_INT32
948 tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
949 	TW_UINT32 timeout)
950 {
951 	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
952 	TW_TIME				end_time;
953 	TW_INT32			error;
954 
955 	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
956 
957 	/*
958 	 * If the cmd queue is full, tw_cli_submit_cmd will queue this
959 	 * request in the pending queue, since this is an internal request.
960 	 */
961 	if ((error = tw_cli_submit_cmd(req))) {
962 		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
963 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
964 			0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
965 			"Failed to start internal request",
966 			"error = %d", error);
967 		return(error);
968 	}
969 
970 	/*
971 	 * Poll for the response until the command gets completed, or there's
972 	 * a timeout.
973 	 */
974 	end_time = tw_osl_get_local_time() + timeout;
975 	do {
976 		if ((error = req->error_code))
977 			/*
978 			 * This will take care of completion due to a reset,
979 			 * or a failure in tw_cli_submit_pending_queue.
980 			 * The caller should do the clean-up.
981 			 */
982 			return(error);
983 
984 		/* See if the command completed. */
985 		tw_cli_process_resp_intr(ctlr);
986 
987 		if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
988 			(req->state != TW_CLI_REQ_STATE_PENDING))
989 			return(req->state != TW_CLI_REQ_STATE_COMPLETE);
990 	} while (tw_osl_get_local_time() <= end_time);
991 
992 	/* Time out! */
993 	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
994 		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
995 		0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
996 		"Internal request timed out",
997 		"request = %p", req);
998 
999 	/*
1000 	 * We will reset the controller only if the request has already been
1001 	 * submitted, so as to not lose the request packet.  If a busy request
1002 	 * timed out, the reset will take care of freeing resources.  If a
1003 	 * pending request timed out, we will free resources for that request,
1004 	 * right here, thereby avoiding a reset.  So, the caller is expected
1005 	 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1006 	 */
1007 
1008 	/*
1009 	 * We have to make sure that this timed out request, if it were in the
1010 	 * pending queue, doesn't get submitted while we are here, from
1011 	 * tw_cli_submit_pending_queue.  There could be a race in that case.
1012 	 * Need to revisit.
1013 	 */
1014 	if (req->state == TW_CLI_REQ_STATE_PENDING) {
1015 		tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1016 			"Removing request from pending queue");
1017 		/*
1018 		 * Request was never submitted.  Clean up.  Note that we did
1019 		 * not do a reset.  So, we have to remove the request ourselves
1020 		 * from the pending queue (as against tw_cli_drain_pendinq_queue
1021 		 * taking care of it).
1022 		 */
1023 		tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1024 		if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1025 			TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1026 				TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1027 		if (req->data)
1028 			ctlr->internal_req_busy = TW_CL_FALSE;
1029 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1030 	}
1031 
1032 	return(TW_OSL_ETIMEDOUT);
1033 }
1034 
1035 
1036 
1037 /*
1038  * Function name:	tw_cl_reset_ctlr
1039  * Description:		Soft resets and then initializes the controller;
1040  *			drains any incomplete requests.
1041  *
1042  * Input:		ctlr	-- ptr to per ctlr structure
1043  * 			req_handle	-- ptr to request handle
1044  * Output:		None
1045  * Return value:	0	-- success
1046  *			non-zero-- failure
1047  */
1048 TW_INT32
1049 tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1050 {
1051 	struct tw_cli_ctlr_context	*ctlr =
1052 		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1053 	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
1054 	struct tw_cli_req_context	*req;
1055 	TW_INT32			reset_attempt = 1;
1056 	TW_INT32			error = 0;
1057 
1058 	tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1059 
1060 	ctlr->reset_in_progress = TW_CL_TRUE;
1061 	twa_teardown_intr(sc);
1062 
1063 
1064 	/*
1065 	 * Error back all requests in the complete, busy, and pending queues.
1066 	 * If any request is already on its way to getting submitted, it's in
1067 	 * none of these queues and so, will not be completed.  That request
1068 	 * will continue its course and get submitted to the controller after
1069 	 * the reset is done (and io_lock is released).
1070 	 */
1071 	tw_cli_drain_complete_queue(ctlr);
1072 	tw_cli_drain_busy_queue(ctlr);
1073 	tw_cli_drain_pending_queue(ctlr);
1074 	ctlr->internal_req_busy = TW_CL_FALSE;
1075 	ctlr->get_more_aens     = TW_CL_FALSE;
1076 
1077 	/* Soft reset the controller. */
1078 	while (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS) {
1079 		if ((error = tw_cli_soft_reset(ctlr))) {
1080 			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1081 				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1082 				0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1083 				"Controller reset failed",
1084 				"error = %d; attempt %d", error, reset_attempt++);
1085 			reset_attempt++;
1086 			continue;
1087 		}
1088 
1089 		/* Re-establish logical connection with the controller. */
1090 		if ((error = tw_cli_init_connection(ctlr,
1091 				(TW_UINT16)(ctlr->max_simult_reqs),
1092 				0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1093 				TW_CL_NULL, TW_CL_NULL))) {
1094 			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1095 				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1096 				0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1097 				"Can't initialize connection after reset",
1098 				"error = %d", error);
1099 			reset_attempt++;
1100 			continue;
1101 		}
1102 
1103 #ifdef    TW_OSL_DEBUG
1104 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1105 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1106 			0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1107 			"Controller reset done!", " ");
1108 #endif /* TW_OSL_DEBUG */
1109 		break;
1110 	} /* End of while */
1111 
1112 	/* Move commands from the reset queue to the pending queue. */
1113 	while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_RESET_Q)) != TW_CL_NULL) {
1114 		tw_osl_timeout(req->req_handle);
1115 		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
1116 	}
1117 
1118 	twa_setup_intr(sc);
1119 	tw_cli_enable_interrupts(ctlr);
1120 	if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL)
1121 		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1122 			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1123 	ctlr->reset_in_progress = TW_CL_FALSE;
1124 	ctlr->reset_needed = TW_CL_FALSE;
1125 
1126 	/* Request for a bus re-scan. */
1127 	tw_osl_scan_bus(ctlr_handle);
1128 
1129 	return(error);
1130 }
1131 
1132 TW_VOID
1133 tw_cl_set_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1134 {
1135 	struct tw_cli_ctlr_context	*ctlr =
1136 		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1137 
1138 	ctlr->reset_needed = TW_CL_TRUE;
1139 }
1140 
1141 TW_INT32
1142 tw_cl_is_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1143 {
1144 	struct tw_cli_ctlr_context	*ctlr =
1145 		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1146 
1147 	return(ctlr->reset_needed);
1148 }
1149 
1150 TW_INT32
1151 tw_cl_is_active(struct tw_cl_ctlr_handle *ctlr_handle)
1152 {
1153 	struct tw_cli_ctlr_context	*ctlr =
1154 		(struct tw_cli_ctlr_context *)
1155 		(ctlr_handle->cl_ctlr_ctxt);
1156 
1157 		return(ctlr->active);
1158 }
1159 
1160 
1161 
1162 /*
1163  * Function name:	tw_cli_soft_reset
1164  * Description:		Does the actual soft reset.
1165  *
1166  * Input:		ctlr	-- ptr to per ctlr structure
1167  * Output:		None
1168  * Return value:	0	-- success
1169  *			non-zero-- failure
1170  */
1171 TW_INT32
1172 tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1173 {
1174 	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
1175 	int				found;
1176 	int				loop_count;
1177 	TW_UINT32			error;
1178 
1179 	tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1180 
1181 	tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1182 		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1183 		0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1184 		"Resetting controller...",
1185 		" ");
1186 
1187 	/* Don't let any new commands get submitted to the controller. */
1188 	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1189 
1190 	TW_CLI_SOFT_RESET(ctlr_handle);
1191 
1192 	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1193 	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1194 	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1195 		/*
1196 		 * There's a hardware bug in the G133 ASIC, which can lead to
1197 		 * PCI parity errors and hangs, if the host accesses any
1198 		 * registers when the firmware is resetting the hardware, as
1199 		 * part of a hard/soft reset.  The window of time when the
1200 		 * problem can occur is about 10 ms.  Here, we will handshake
1201 		 * with the firmware to find out when the firmware is pulling
1202 		 * down the hardware reset pin, and wait for about 500 ms to
1203 		 * make sure we don't access any hardware registers (for
1204 		 * polling) during that window.
1205 		 */
1206 		ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1207 		loop_count = 0;
1208 		do {
1209 			found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1210 			tw_osl_delay(10);
1211 			loop_count++;
1212 			error = 0x7888;
1213 		} while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1214 
1215 		if (!found) {
1216 			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1217 				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1218 				0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1219 				"Missed firmware handshake after soft-reset",
1220 				"error = %d", error);
1221 			tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1222 			return(error);
1223 		}
1224 
1225 		tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1226 		ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1227 	}
1228 
1229 	if ((error = tw_cli_poll_status(ctlr,
1230 			TWA_STATUS_MICROCONTROLLER_READY |
1231 			TWA_STATUS_ATTENTION_INTERRUPT,
1232 			TW_CLI_RESET_TIMEOUT_PERIOD))) {
1233 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1234 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1235 			0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1236 			"Micro-ctlr not ready/No attn intr after reset",
1237 			"error = %d", error);
1238 		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1239 		return(error);
1240 	}
1241 
1242 	TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1243 		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1244 
1245 	if ((error = tw_cli_drain_response_queue(ctlr))) {
1246 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1247 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1248 			0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1249 			"Can't drain response queue after reset",
1250 			"error = %d", error);
1251 		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1252 		return(error);
1253 	}
1254 
1255 	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1256 
1257 	if ((error = tw_cli_drain_aen_queue(ctlr))) {
1258 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1259 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1260 			0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1261 			"Can't drain AEN queue after reset",
1262 			"error = %d", error);
1263 		return(error);
1264 	}
1265 
1266 	if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1267 		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1268 			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1269 			0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1270 			"Reset not reported by controller",
1271 			"error = %d", error);
1272 		return(error);
1273 	}
1274 
1275 	return(TW_OSL_ESUCCESS);
1276 }
1277 
1278 
1279 
1280 /*
1281  * Function name:	tw_cli_send_scsi_cmd
1282  * Description:		Sends down a scsi cmd to fw.
1283  *
1284  * Input:		req	-- ptr to request pkt
1285  *			cmd	-- opcode of scsi cmd to send
1286  * Output:		None
1287  * Return value:	0	-- success
1288  *			non-zero-- failure
1289  */
1290 TW_INT32
1291 tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1292 {
1293 	struct tw_cl_command_packet	*cmdpkt;
1294 	struct tw_cl_command_9k		*cmd9k;
1295 	struct tw_cli_ctlr_context	*ctlr;
1296 	TW_INT32			error;
1297 
1298 	ctlr = req->ctlr;
1299 	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1300 
1301 	/* Make sure this is the only CL internal request at this time. */
1302 	if (ctlr->internal_req_busy)
1303 		return(TW_OSL_EBUSY);
1304 	ctlr->internal_req_busy = TW_CL_TRUE;
1305 	req->data = ctlr->internal_req_data;
1306 	req->data_phys = ctlr->internal_req_data_phys;
1307 	tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1308 	req->length = TW_CLI_SECTOR_SIZE;
1309 
1310 	/* Build the cmd pkt. */
1311 	cmdpkt = req->cmd_pkt;
1312 
1313 	cmdpkt->cmd_hdr.header_desc.size_header = 128;
1314 
1315 	cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1316 
1317 	cmd9k->res__opcode =
1318 		BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1319 	cmd9k->unit = 0;
1320 	cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1321 	cmd9k->status = 0;
1322 	cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1323 	cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1324 
1325 	if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1326 		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1327 			TW_CL_SWAP64(req->data_phys);
1328 		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1329 			TW_CL_SWAP32(req->length);
1330 	} else {
1331 		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1332 			TW_CL_SWAP32(req->data_phys);
1333 		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1334 			TW_CL_SWAP32(req->length);
1335 	}
1336 
1337 	cmd9k->cdb[0] = (TW_UINT8)cmd;
1338 	cmd9k->cdb[4] = 128;
1339 
1340 	if ((error = tw_cli_submit_cmd(req)))
1341 		if (error != TW_OSL_EBUSY) {
1342 			tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1343 				tw_osl_cur_func(),
1344 				"Failed to start SCSI command "
1345 				"(request = %p, error = %d)", req, error);
1346 			return(TW_OSL_EIO);
1347 		}
1348 	return(TW_OSL_ESUCCESS);
1349 }
1350 
1351 
1352 
1353 /*
1354  * Function name:	tw_cli_get_aen
1355  * Description:		Sends down a Request Sense cmd to fw to fetch an AEN.
1356  *
1357  * Input:		ctlr	-- ptr to per ctlr structure
1358  * Output:		None
1359  * Return value:	0	-- success
1360  *			non-zero-- failure
1361  */
1362 TW_INT32
1363 tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1364 {
1365 	struct tw_cli_req_context	*req;
1366 	TW_INT32			error;
1367 
1368 	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1369 
1370 	if ((req = tw_cli_get_request(ctlr
1371 		)) == TW_CL_NULL)
1372 		return(TW_OSL_EBUSY);
1373 
1374 	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1375 	req->flags |= TW_CLI_REQ_FLAGS_9K;
1376 	req->tw_cli_callback = tw_cli_aen_callback;
1377 	if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1378 		tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1379 			"Could not send SCSI command "
1380 			"(request = %p, error = %d)", req, error);
1381 		if (req->data)
1382 			ctlr->internal_req_busy = TW_CL_FALSE;
1383 		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1384 	}
1385 	return(error);
1386 }
1387 
1388 
1389 
1390 /*
1391  * Function name:	tw_cli_fill_sg_list
1392  * Description:		Fills in the scatter/gather list.
1393  *
1394  * Input:		ctlr	-- ptr to per ctlr structure
1395  *			sgl_src	-- ptr to fill the sg list from
1396  *			sgl_dest-- ptr to sg list
1397  *			nsegments--# of segments
1398  * Output:		None
1399  * Return value:	None
1400  */
1401 TW_VOID
1402 tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1403 	TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1404 {
1405 	TW_INT32	i;
1406 
1407 	tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1408 
1409 	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1410 		struct tw_cl_sg_desc64 *sgl_s =
1411 			(struct tw_cl_sg_desc64 *)sgl_src;
1412 		struct tw_cl_sg_desc64 *sgl_d =
1413 			(struct tw_cl_sg_desc64 *)sgl_dest;
1414 
1415 		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1416 			"64 bit addresses");
1417 		for (i = 0; i < num_sgl_entries; i++) {
1418 			sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1419 			sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1420 			sgl_s++;
1421 			if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1422 				sgl_s = (struct tw_cl_sg_desc64 *)
1423 					(((TW_INT8 *)(sgl_s)) + 4);
1424 		}
1425 	} else {
1426 		struct tw_cl_sg_desc32 *sgl_s =
1427 			(struct tw_cl_sg_desc32 *)sgl_src;
1428 		struct tw_cl_sg_desc32 *sgl_d =
1429 			(struct tw_cl_sg_desc32 *)sgl_dest;
1430 
1431 		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1432 			"32 bit addresses");
1433 		for (i = 0; i < num_sgl_entries; i++) {
1434 			sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1435 			sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1436 		}
1437 	}
1438 }
1439