1 /*
2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3  * i.e. Thunderbolt and Invader
4  *
5  * Solaris MegaRAID device driver for SAS2.0 controllers
6  * Copyright (c) 2008-2012, LSI Logic Corporation.
7  * All rights reserved.
8  *
9  * Version:
10  * Author:
11  *		Swaminathan K S
12  *		Arun Chandrashekhar
13  *		Manju R
14  *		Rasheed
15  *		Shakeel Bukhari
16  */
17 
18 /*
19  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
20  * Copyright 2015 Citrus IT Limited. All rights reserved.
21  */
22 
23 
24 #include <sys/types.h>
25 #include <sys/file.h>
26 #include <sys/atomic.h>
27 #include <sys/scsi/scsi.h>
28 #include <sys/byteorder.h>
29 #include <sys/sdt.h>
30 #include "ld_pd_map.h"
31 #include "mr_sas.h"
32 #include "fusion.h"
33 
34 /*
35  * FMA header files
36  */
37 #include <sys/ddifm.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/fm/io/ddi.h>
41 
42 
43 /* Pre-TB command size and TB command size. */
44 #define	MR_COMMAND_SIZE (64*20)	/* 1280 bytes */
45 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
46 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
48 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
49 extern ddi_dma_attr_t mrsas_generic_dma_attr;
50 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
51 extern struct ddi_device_acc_attr endian_attr;
52 extern int	debug_level_g;
53 extern unsigned int	enable_fp;
54 volatile int dump_io_wait_time = 90;
55 extern volatile int  debug_timeout_g;
56 extern int	mrsas_issue_pending_cmds(struct mrsas_instance *);
57 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
58 extern void	push_pending_mfi_pkt(struct mrsas_instance *,
59 			struct mrsas_cmd *);
60 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
61 	    MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
62 
63 /* Local static prototypes. */
64 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
65     struct scsi_address *, struct scsi_pkt *, uchar_t *);
66 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
67     U64 start_blk, U32 num_blocks);
68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
72 #ifdef PDSUPPORT
73 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
74     struct mrsas_tbolt_pd_info *, int);
75 #endif /* PDSUPPORT */
76 
77 static int debug_tbolt_fw_faults_after_ocr_g = 0;
78 
79 /*
80  * destroy_mfi_mpi_frame_pool
81  */
82 void
83 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
84 {
85 	int	i;
86 
87 	struct mrsas_cmd	*cmd;
88 
89 	/* return all mfi frames to pool */
90 	for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
91 		cmd = instance->cmd_list[i];
92 		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
93 			(void) mrsas_free_dma_obj(instance,
94 			    cmd->frame_dma_obj);
95 		}
96 		cmd->frame_dma_obj_status = DMA_OBJ_FREED;
97 	}
98 }
99 
100 /*
101  * destroy_mpi2_frame_pool
102  */
103 void
104 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
105 {
106 
107 	if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
108 		(void) mrsas_free_dma_obj(instance,
109 		    instance->mpi2_frame_pool_dma_obj);
110 		instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
111 	}
112 }
113 
114 
115 /*
116  * mrsas_tbolt_free_additional_dma_buffer
117  */
118 void
119 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
120 {
121 	int i;
122 
123 	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
124 		(void) mrsas_free_dma_obj(instance,
125 		    instance->mfi_internal_dma_obj);
126 		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
127 	}
128 	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
129 		(void) mrsas_free_dma_obj(instance,
130 		    instance->mfi_evt_detail_obj);
131 		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
132 	}
133 
134 	for (i = 0; i < 2; i++) {
135 		if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
136 			(void) mrsas_free_dma_obj(instance,
137 			    instance->ld_map_obj[i]);
138 			instance->ld_map_obj[i].status = DMA_OBJ_FREED;
139 		}
140 	}
141 }
142 
143 
144 /*
145  * free_req_desc_pool
146  */
147 void
148 free_req_rep_desc_pool(struct mrsas_instance *instance)
149 {
150 	if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
151 		(void) mrsas_free_dma_obj(instance,
152 		    instance->request_desc_dma_obj);
153 		instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
154 	}
155 
156 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
157 		(void) mrsas_free_dma_obj(instance,
158 		    instance->reply_desc_dma_obj);
159 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
160 	}
161 
162 
163 }
164 
165 
166 /*
167  * ThunderBolt(TB) Request Message Frame Pool
168  */
169 int
170 create_mpi2_frame_pool(struct mrsas_instance *instance)
171 {
172 	int		i = 0;
173 	uint16_t	max_cmd;
174 	uint32_t	sgl_sz;
175 	uint32_t	raid_msg_size;
176 	uint32_t	total_size;
177 	uint32_t	offset;
178 	uint32_t	io_req_base_phys;
179 	uint8_t		*io_req_base;
180 	struct mrsas_cmd	*cmd;
181 
182 	max_cmd = instance->max_fw_cmds;
183 
184 	sgl_sz		= 1024;
185 	raid_msg_size	= MRSAS_THUNDERBOLT_MSG_SIZE;
186 
187 	/* Allocating additional 256 bytes to accomodate SMID 0. */
188 	total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
189 	    (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
190 
191 	con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
192 	    "max_cmd %x", max_cmd));
193 
194 	con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
195 	    "request message frame pool size %x", total_size));
196 
197 	/*
198 	 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
199 	 * and then split the memory to 1024 commands. Each command should be
200 	 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
201 	 * within it. Further refer the "alloc_req_rep_desc" function where
202 	 * we allocate request/reply descriptors queues for a clue.
203 	 */
204 
205 	instance->mpi2_frame_pool_dma_obj.size = total_size;
206 	instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
207 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
208 	    0xFFFFFFFFU;
209 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
210 	    0xFFFFFFFFU;
211 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
212 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
213 
214 	if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
215 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
216 		dev_err(instance->dip, CE_WARN,
217 		    "could not alloc mpi2 frame pool");
218 		return (DDI_FAILURE);
219 	}
220 
221 	bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
222 	instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
223 
224 	instance->io_request_frames =
225 	    (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
226 	instance->io_request_frames_phy =
227 	    (uint32_t)
228 	    instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
229 
230 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
231 	    (void *)instance->io_request_frames));
232 
233 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
234 	    instance->io_request_frames_phy));
235 
236 	io_req_base = (uint8_t *)instance->io_request_frames +
237 	    MRSAS_THUNDERBOLT_MSG_SIZE;
238 	io_req_base_phys = instance->io_request_frames_phy +
239 	    MRSAS_THUNDERBOLT_MSG_SIZE;
240 
241 	con_log(CL_DLEVEL3, (CE_NOTE,
242 	    "io req_base_phys 0x%x", io_req_base_phys));
243 
244 	for (i = 0; i < max_cmd; i++) {
245 		cmd = instance->cmd_list[i];
246 
247 		offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
248 
249 		cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
250 		    ((uint8_t *)io_req_base + offset);
251 		cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
252 
253 		cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
254 		    (max_cmd * raid_msg_size) + i * sgl_sz);
255 
256 		cmd->sgl_phys_addr = (io_req_base_phys +
257 		    (max_cmd * raid_msg_size) + i * sgl_sz);
258 
259 		cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
260 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
261 		    (i * SENSE_LENGTH));
262 
263 		cmd->sense_phys_addr1 = (io_req_base_phys +
264 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
265 		    (i * SENSE_LENGTH));
266 
267 
268 		cmd->SMID = i + 1;
269 
270 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
271 		    cmd->index, (void *)cmd->scsi_io_request));
272 
273 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
274 		    cmd->index, cmd->scsi_io_request_phys_addr));
275 
276 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
277 		    cmd->index, (void *)cmd->sense1));
278 
279 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
280 		    cmd->index, cmd->sense_phys_addr1));
281 
282 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
283 		    cmd->index, (void *)cmd->sgl));
284 
285 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
286 		    cmd->index, cmd->sgl_phys_addr));
287 	}
288 
289 	return (DDI_SUCCESS);
290 
291 }
292 
293 
294 /*
295  * alloc_additional_dma_buffer for AEN
296  */
297 int
298 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
299 {
300 	uint32_t	internal_buf_size = PAGESIZE*2;
301 	int i;
302 
303 	/* Initialize buffer status as free */
304 	instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
305 	instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
306 	instance->ld_map_obj[0].status = DMA_OBJ_FREED;
307 	instance->ld_map_obj[1].status = DMA_OBJ_FREED;
308 
309 
310 	instance->mfi_internal_dma_obj.size = internal_buf_size;
311 	instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
312 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
313 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
314 	    0xFFFFFFFFU;
315 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
316 
317 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
318 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
319 		dev_err(instance->dip, CE_WARN,
320 		    "could not alloc reply queue");
321 		return (DDI_FAILURE);
322 	}
323 
324 	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
325 
326 	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
327 	instance->internal_buf =
328 	    (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
329 	instance->internal_buf_dmac_add =
330 	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
331 	instance->internal_buf_size = internal_buf_size;
332 
333 	/* allocate evt_detail */
334 	instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
335 	instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
336 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
337 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
338 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
339 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
340 
341 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
342 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
343 		dev_err(instance->dip, CE_WARN,
344 		    "mrsas_tbolt_alloc_additional_dma_buffer: "
345 		    "could not allocate data transfer buffer.");
346 		goto fail_tbolt_additional_buff;
347 	}
348 
349 	bzero(instance->mfi_evt_detail_obj.buffer,
350 	    sizeof (struct mrsas_evt_detail));
351 
352 	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
353 
354 	instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
355 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
356 
357 	for (i = 0; i < 2; i++) {
358 		/* allocate the data transfer buffer */
359 		instance->ld_map_obj[i].size = instance->size_map_info;
360 		instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
361 		instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
362 		instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
363 		    0xFFFFFFFFU;
364 		instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
365 		instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
366 
367 		if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
368 		    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
369 			dev_err(instance->dip, CE_WARN,
370 			    "could not allocate data transfer buffer.");
371 			goto fail_tbolt_additional_buff;
372 		}
373 
374 		instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
375 
376 		bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
377 
378 		instance->ld_map[i] =
379 		    (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
380 		instance->ld_map_phy[i] = (uint32_t)instance->
381 		    ld_map_obj[i].dma_cookie[0].dmac_address;
382 
383 		con_log(CL_DLEVEL3, (CE_NOTE,
384 		    "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
385 
386 		con_log(CL_DLEVEL3, (CE_NOTE,
387 		    "size_map_info 0x%x", instance->size_map_info));
388 	}
389 
390 	return (DDI_SUCCESS);
391 
392 fail_tbolt_additional_buff:
393 	mrsas_tbolt_free_additional_dma_buffer(instance);
394 
395 	return (DDI_FAILURE);
396 }
397 
398 MRSAS_REQUEST_DESCRIPTOR_UNION *
399 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
400 {
401 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
402 
403 	if (index > instance->max_fw_cmds) {
404 		con_log(CL_ANN1, (CE_NOTE,
405 		    "Invalid SMID 0x%x request for descriptor", index));
406 		con_log(CL_ANN1, (CE_NOTE,
407 		    "max_fw_cmds : 0x%x", instance->max_fw_cmds));
408 		return (NULL);
409 	}
410 
411 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
412 	    ((char *)instance->request_message_pool +
413 	    (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
414 
415 	con_log(CL_ANN1, (CE_NOTE,
416 	    "request descriptor : 0x%08lx", (unsigned long)req_desc));
417 
418 	con_log(CL_ANN1, (CE_NOTE,
419 	    "request descriptor base phy : 0x%08lx",
420 	    (unsigned long)instance->request_message_pool_phy));
421 
422 	return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
423 }
424 
425 
426 /*
427  * Allocate Request and Reply  Queue Descriptors.
428  */
429 int
430 alloc_req_rep_desc(struct mrsas_instance *instance)
431 {
432 	uint32_t	request_q_sz, reply_q_sz;
433 	int		i, max_reply_q_sz;
434 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
435 
436 	/*
437 	 * ThunderBolt(TB) There's no longer producer consumer mechanism.
438 	 * Once we have an interrupt we are supposed to scan through the list of
439 	 * reply descriptors and process them accordingly. We would be needing
440 	 * to allocate memory for 1024 reply descriptors
441 	 */
442 
443 	/* Allocate Reply Descriptors */
444 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
445 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446 
447 	/* reply queue size should be multiple of 16 */
448 	max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
449 
450 	reply_q_sz = 8 * max_reply_q_sz;
451 
452 
453 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
454 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
455 
456 	instance->reply_desc_dma_obj.size = reply_q_sz;
457 	instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
458 	instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
459 	instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
460 	instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
461 	instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
462 
463 	if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
464 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
465 		dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
466 		return (DDI_FAILURE);
467 	}
468 
469 	bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
470 	instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
471 
472 	/* virtual address of  reply queue */
473 	instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
474 	    instance->reply_desc_dma_obj.buffer);
475 
476 	instance->reply_q_depth = max_reply_q_sz;
477 
478 	con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
479 	    instance->reply_q_depth));
480 
481 	con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
482 	    (void *)instance->reply_frame_pool));
483 
484 	/* initializing reply address to 0xFFFFFFFF */
485 	reply_desc = instance->reply_frame_pool;
486 
487 	for (i = 0; i < instance->reply_q_depth; i++) {
488 		reply_desc->Words = (uint64_t)~0;
489 		reply_desc++;
490 	}
491 
492 
493 	instance->reply_frame_pool_phy =
494 	    (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
495 
496 	con_log(CL_ANN1, (CE_NOTE,
497 	    "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
498 
499 
500 	instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
501 	    reply_q_sz);
502 
503 	con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
504 	    instance->reply_pool_limit_phy));
505 
506 
507 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
508 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
509 
510 	/* Allocate Request Descriptors */
511 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
512 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
513 
514 	request_q_sz = 8 *
515 	    (instance->max_fw_cmds);
516 
517 	instance->request_desc_dma_obj.size = request_q_sz;
518 	instance->request_desc_dma_obj.dma_attr	= mrsas_generic_dma_attr;
519 	instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
520 	instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
521 	    0xFFFFFFFFU;
522 	instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen	= 1;
523 	instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
524 
525 	if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
526 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
527 		dev_err(instance->dip, CE_WARN,
528 		    "could not alloc request queue desc");
529 		goto fail_undo_reply_queue;
530 	}
531 
532 	bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
533 	instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
534 
535 	/* virtual address of  request queue desc */
536 	instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
537 	    (instance->request_desc_dma_obj.buffer);
538 
539 	instance->request_message_pool_phy =
540 	    (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
541 
542 	return (DDI_SUCCESS);
543 
544 fail_undo_reply_queue:
545 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
546 		(void) mrsas_free_dma_obj(instance,
547 		    instance->reply_desc_dma_obj);
548 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
549 	}
550 
551 	return (DDI_FAILURE);
552 }
553 
554 /*
555  * mrsas_alloc_cmd_pool_tbolt
556  *
557  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
558  * routine
559  */
560 int
561 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
562 {
563 	int		i;
564 	int		count;
565 	uint32_t	max_cmd;
566 	uint32_t	reserve_cmd;
567 	size_t		sz;
568 
569 	struct mrsas_cmd	*cmd;
570 
571 	max_cmd = instance->max_fw_cmds;
572 	con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
573 	    "max_cmd %x", max_cmd));
574 
575 
576 	sz = sizeof (struct mrsas_cmd *) * max_cmd;
577 
578 	/*
579 	 * instance->cmd_list is an array of struct mrsas_cmd pointers.
580 	 * Allocate the dynamic array first and then allocate individual
581 	 * commands.
582 	 */
583 	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
584 
585 	/* create a frame pool and assign one frame to each cmd */
586 	for (count = 0; count < max_cmd; count++) {
587 		instance->cmd_list[count] =
588 		    kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
589 	}
590 
591 	/* add all the commands to command pool */
592 
593 	INIT_LIST_HEAD(&instance->cmd_pool_list);
594 	INIT_LIST_HEAD(&instance->cmd_pend_list);
595 	INIT_LIST_HEAD(&instance->cmd_app_pool_list);
596 
597 	reserve_cmd = MRSAS_APP_RESERVED_CMDS;
598 
599 	/* cmd index 0 reservered for IOC INIT */
600 	for (i = 1; i < reserve_cmd; i++) {
601 		cmd		= instance->cmd_list[i];
602 		cmd->index	= i;
603 		mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
604 	}
605 
606 
607 	for (i = reserve_cmd; i < max_cmd; i++) {
608 		cmd		= instance->cmd_list[i];
609 		cmd->index	= i;
610 		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
611 	}
612 
613 	return (DDI_SUCCESS);
614 
615 mrsas_undo_cmds:
616 	if (count > 0) {
617 		/* free each cmd */
618 		for (i = 0; i < count; i++) {
619 			if (instance->cmd_list[i] != NULL) {
620 				kmem_free(instance->cmd_list[i],
621 				    sizeof (struct mrsas_cmd));
622 			}
623 			instance->cmd_list[i] = NULL;
624 		}
625 	}
626 
627 mrsas_undo_cmd_list:
628 	if (instance->cmd_list != NULL)
629 		kmem_free(instance->cmd_list, sz);
630 	instance->cmd_list = NULL;
631 
632 	return (DDI_FAILURE);
633 }
634 
635 
636 /*
637  * free_space_for_mpi2
638  */
639 void
640 free_space_for_mpi2(struct mrsas_instance *instance)
641 {
642 	/* already freed */
643 	if (instance->cmd_list == NULL) {
644 		return;
645 	}
646 
647 	/* First free the additional DMA buffer */
648 	mrsas_tbolt_free_additional_dma_buffer(instance);
649 
650 	/* Free the request/reply descriptor pool */
651 	free_req_rep_desc_pool(instance);
652 
653 	/*  Free the MPI message pool */
654 	destroy_mpi2_frame_pool(instance);
655 
656 	/* Free the MFI frame pool */
657 	destroy_mfi_frame_pool(instance);
658 
659 	/* Free all the commands in the cmd_list */
660 	/* Free the cmd_list buffer itself */
661 	mrsas_free_cmd_pool(instance);
662 }
663 
664 
665 /*
666  * ThunderBolt(TB) memory allocations for commands/messages/frames.
667  */
668 int
669 alloc_space_for_mpi2(struct mrsas_instance *instance)
670 {
671 	/* Allocate command pool (memory for cmd_list & individual commands) */
672 	if (mrsas_alloc_cmd_pool_tbolt(instance)) {
673 		dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
674 		return (DDI_FAILURE);
675 	}
676 
677 	/* Initialize single reply size and Message size */
678 	instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
679 	instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
680 
681 	instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
682 	    (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
683 	    sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
684 	instance->max_sge_in_chain = (MR_COMMAND_SIZE -
685 	    MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
686 
687 	/* Reduce SG count by 1 to take care of group cmds feature in FW */
688 	instance->max_num_sge = (instance->max_sge_in_main_msg +
689 	    instance->max_sge_in_chain - 2);
690 	instance->chain_offset_mpt_msg =
691 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
692 	instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
693 	    sizeof (MPI2_SGE_IO_UNION)) / 16;
694 	instance->reply_read_index = 0;
695 
696 
697 	/* Allocate Request and Reply descriptors Array */
698 	/* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
699 	if (alloc_req_rep_desc(instance)) {
700 		dev_err(instance->dip, CE_WARN,
701 		    "Error, allocating memory for descripter-pool");
702 		goto mpi2_undo_cmd_pool;
703 	}
704 	con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
705 	    instance->request_message_pool_phy));
706 
707 
708 	/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
709 	if (create_mfi_frame_pool(instance)) {
710 		dev_err(instance->dip, CE_WARN,
711 		    "Error, allocating memory for MFI frame-pool");
712 		goto mpi2_undo_descripter_pool;
713 	}
714 
715 
716 	/* Allocate MPI2 Message pool */
717 	/*
718 	 * Make sure the buffer is alligned to 256 for raid message packet
719 	 * create a io request pool and assign one frame to each cmd
720 	 */
721 
722 	if (create_mpi2_frame_pool(instance)) {
723 		dev_err(instance->dip, CE_WARN,
724 		    "Error, allocating memory for MPI2 Message-pool");
725 		goto mpi2_undo_mfi_frame_pool;
726 	}
727 
728 #ifdef DEBUG
729 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
730 	    instance->max_sge_in_main_msg));
731 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
732 	    instance->max_sge_in_chain));
733 	con_log(CL_ANN1, (CE_CONT,
734 	    "[max_sge]0x%x", instance->max_num_sge));
735 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
736 	    instance->chain_offset_mpt_msg));
737 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
738 	    instance->chain_offset_io_req));
739 #endif
740 
741 
742 	/* Allocate additional dma buffer */
743 	if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
744 		dev_err(instance->dip, CE_WARN,
745 		    "Error, allocating tbolt additional DMA buffer");
746 		goto mpi2_undo_message_pool;
747 	}
748 
749 	return (DDI_SUCCESS);
750 
751 mpi2_undo_message_pool:
752 	destroy_mpi2_frame_pool(instance);
753 
754 mpi2_undo_mfi_frame_pool:
755 	destroy_mfi_frame_pool(instance);
756 
757 mpi2_undo_descripter_pool:
758 	free_req_rep_desc_pool(instance);
759 
760 mpi2_undo_cmd_pool:
761 	mrsas_free_cmd_pool(instance);
762 
763 	return (DDI_FAILURE);
764 }
765 
766 
767 /*
768  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
769  */
770 int
771 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
772 {
773 
774 	/*
775 	 * Reduce the max supported cmds by 1. This is to ensure that the
776 	 * reply_q_sz (1 more than the max cmd that driver may send)
777 	 * does not exceed max cmds that the FW can support
778 	 */
779 
780 	if (instance->max_fw_cmds > 1008) {
781 		instance->max_fw_cmds = 1008;
782 		instance->max_fw_cmds = instance->max_fw_cmds-1;
783 	}
784 
785 	con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
786 	    "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
787 
788 
789 	/* create a pool of commands */
790 	if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
791 		dev_err(instance->dip, CE_WARN,
792 		    "alloc_space_for_mpi2() failed.");
793 
794 		return (DDI_FAILURE);
795 	}
796 
797 	/* Send ioc init message */
798 	/* NOTE: the issue_init call does FMA checking already. */
799 	if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
800 		dev_err(instance->dip, CE_WARN,
801 		    "mrsas_issue_init_mpi2() failed.");
802 
803 		goto fail_init_fusion;
804 	}
805 
806 	instance->unroll.alloc_space_mpi2 = 1;
807 
808 	con_log(CL_ANN, (CE_NOTE,
809 	    "mrsas_init_adapter_tbolt: SUCCESSFUL"));
810 
811 	return (DDI_SUCCESS);
812 
813 fail_init_fusion:
814 	free_space_for_mpi2(instance);
815 
816 	return (DDI_FAILURE);
817 }
818 
819 
820 
821 /*
822  * init_mpi2
823  */
824 int
825 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
826 {
827 	dma_obj_t init2_dma_obj;
828 	int ret_val = DDI_SUCCESS;
829 
830 	/* allocate DMA buffer for IOC INIT message */
831 	init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
832 	init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
833 	init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
834 	init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
835 	init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
836 	init2_dma_obj.dma_attr.dma_attr_align = 256;
837 
838 	if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
839 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
840 		dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
841 		    "could not allocate data transfer buffer.");
842 		return (DDI_FAILURE);
843 	}
844 	(void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
845 
846 	con_log(CL_ANN1, (CE_NOTE,
847 	    "mrsas_issue_init_mpi2 _phys adr: %x",
848 	    init2_dma_obj.dma_cookie[0].dmac_address));
849 
850 
851 	/* Initialize and send ioc init message */
852 	ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
853 	if (ret_val == DDI_FAILURE) {
854 		con_log(CL_ANN1, (CE_WARN,
855 		    "mrsas_issue_init_mpi2: Failed"));
856 		goto fail_init_mpi2;
857 	}
858 
859 	/* free IOC init DMA buffer */
860 	if (mrsas_free_dma_obj(instance, init2_dma_obj)
861 	    != DDI_SUCCESS) {
862 		con_log(CL_ANN1, (CE_WARN,
863 		    "mrsas_issue_init_mpi2: Free Failed"));
864 		return (DDI_FAILURE);
865 	}
866 
867 	/* Get/Check and sync ld_map info */
868 	instance->map_id = 0;
869 	if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
870 		(void) mrsas_tbolt_sync_map_info(instance);
871 
872 
873 	/* No mrsas_cmd to send, so send NULL. */
874 	if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
875 		goto fail_init_mpi2;
876 
877 	con_log(CL_ANN, (CE_NOTE,
878 	    "mrsas_issue_init_mpi2: SUCCESSFUL"));
879 
880 	return (DDI_SUCCESS);
881 
882 fail_init_mpi2:
883 	(void) mrsas_free_dma_obj(instance, init2_dma_obj);
884 
885 	return (DDI_FAILURE);
886 }
887 
888 static int
889 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
890 {
891 	int				numbytes;
892 	uint16_t			flags;
893 	struct mrsas_init_frame2	*mfiFrameInit2;
894 	struct mrsas_header		*frame_hdr;
895 	Mpi2IOCInitRequest_t		*init;
896 	struct mrsas_cmd		*cmd = NULL;
897 	struct mrsas_drv_ver		drv_ver_info;
898 	MRSAS_REQUEST_DESCRIPTOR_UNION	*req_desc;
899 
900 	con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
901 
902 
903 #ifdef DEBUG
904 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
905 	    (int)sizeof (*mfiFrameInit2)));
906 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
907 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
908 	    (int)sizeof (struct mrsas_init_frame2)));
909 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
910 	    (int)sizeof (Mpi2IOCInitRequest_t)));
911 #endif
912 
913 	init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
914 	numbytes = sizeof (*init);
915 	bzero(init, numbytes);
916 
917 	ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
918 	    MPI2_FUNCTION_IOC_INIT);
919 
920 	ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
921 	    MPI2_WHOINIT_HOST_DRIVER);
922 
923 	/* set MsgVersion and HeaderVersion host driver was built with */
924 	ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
925 	    MPI2_VERSION);
926 
927 	ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
928 	    MPI2_HEADER_VERSION);
929 
930 	ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
931 	    instance->raid_io_msg_size / 4);
932 
933 	ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
934 	    0);
935 
936 	ddi_put16(mpi2_dma_obj->acc_handle,
937 	    &init->ReplyDescriptorPostQueueDepth,
938 	    instance->reply_q_depth);
939 	/*
940 	 * These addresses are set using the DMA cookie addresses from when the
941 	 * memory was allocated.  Sense buffer hi address should be 0.
942 	 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
943 	 */
944 
945 	ddi_put32(mpi2_dma_obj->acc_handle,
946 	    &init->SenseBufferAddressHigh, 0);
947 
948 	ddi_put64(mpi2_dma_obj->acc_handle,
949 	    (uint64_t *)&init->SystemRequestFrameBaseAddress,
950 	    instance->io_request_frames_phy);
951 
952 	ddi_put64(mpi2_dma_obj->acc_handle,
953 	    &init->ReplyDescriptorPostQueueAddress,
954 	    instance->reply_frame_pool_phy);
955 
956 	ddi_put64(mpi2_dma_obj->acc_handle,
957 	    &init->ReplyFreeQueueAddress, 0);
958 
959 	cmd = instance->cmd_list[0];
960 	if (cmd == NULL) {
961 		return (DDI_FAILURE);
962 	}
963 	cmd->retry_count_for_ocr = 0;
964 	cmd->pkt = NULL;
965 	cmd->drv_pkt_time = 0;
966 
967 	mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
968 	con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
969 
970 	frame_hdr = &cmd->frame->hdr;
971 
972 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
973 	    MFI_CMD_STATUS_POLL_MODE);
974 
975 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
976 
977 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
978 
979 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
980 
981 	con_log(CL_ANN, (CE_CONT,
982 	    "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
983 
984 	/* Init the MFI Header */
985 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
986 	    &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
987 
988 	con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
989 
990 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
991 	    &mfiFrameInit2->cmd_status,
992 	    MFI_STAT_INVALID_STATUS);
993 
994 	con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
995 
996 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
997 	    &mfiFrameInit2->queue_info_new_phys_addr_lo,
998 	    mpi2_dma_obj->dma_cookie[0].dmac_address);
999 
1000 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001 	    &mfiFrameInit2->data_xfer_len,
1002 	    sizeof (Mpi2IOCInitRequest_t));
1003 
1004 	con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1005 	    (int)init->ReplyDescriptorPostQueueAddress));
1006 
1007 	/* fill driver version information */
1008 	fill_up_drv_ver(&drv_ver_info);
1009 
1010 	/* allocate the driver version data transfer buffer */
1011 	instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1012 	instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1013 	instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1014 	instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1015 	instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1016 	instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1017 
1018 	if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1019 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1020 		dev_err(instance->dip, CE_WARN,
1021 		    "fusion init: Could not allocate driver version buffer.");
1022 		return (DDI_FAILURE);
1023 	}
1024 	/* copy driver version to dma buffer */
1025 	bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1026 	ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1027 	    (uint8_t *)drv_ver_info.drv_ver,
1028 	    (uint8_t *)instance->drv_ver_dma_obj.buffer,
1029 	    sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1030 
1031 	/* send driver version physical address to firmware */
1032 	ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1033 	    instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1034 
1035 	con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1036 	    mfiFrameInit2->queue_info_new_phys_addr_lo,
1037 	    (int)sizeof (Mpi2IOCInitRequest_t)));
1038 
1039 	con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1040 
1041 	con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1042 	    cmd->scsi_io_request_phys_addr,
1043 	    (int)sizeof (struct mrsas_init_frame2)));
1044 
1045 	/* disable interrupts before sending INIT2 frame */
1046 	instance->func_ptr->disable_intr(instance);
1047 
1048 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1049 	    instance->request_message_pool;
1050 	req_desc->Words = cmd->scsi_io_request_phys_addr;
1051 	req_desc->MFAIo.RequestFlags =
1052 	    (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1053 
1054 	cmd->request_desc = req_desc;
1055 
1056 	/* issue the init frame */
1057 	instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1058 
1059 	con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1060 	con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1061 	    frame_hdr->cmd_status));
1062 
1063 	if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1064 	    &mfiFrameInit2->cmd_status) == 0) {
1065 		con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1066 	} else {
1067 		con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1068 		mrsas_dump_reply_desc(instance);
1069 		goto fail_ioc_init;
1070 	}
1071 
1072 	mrsas_dump_reply_desc(instance);
1073 
1074 	instance->unroll.verBuff = 1;
1075 
1076 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1077 
1078 	return (DDI_SUCCESS);
1079 
1080 
1081 fail_ioc_init:
1082 
1083 	(void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1084 
1085 	return (DDI_FAILURE);
1086 }
1087 
1088 int
1089 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1090 {
1091 	int i;
1092 	uint32_t wait_time = dump_io_wait_time;
1093 	for (i = 0; i < wait_time; i++) {
1094 		/*
1095 		 * Check For Outstanding poll Commands
1096 		 * except ldsync command and aen command
1097 		 */
1098 		if (instance->fw_outstanding <= 2) {
1099 			break;
1100 		}
1101 		drv_usecwait(10*MILLISEC);
1102 		/* complete commands from reply queue */
1103 		(void) mr_sas_tbolt_process_outstanding_cmd(instance);
1104 	}
1105 	if (instance->fw_outstanding > 2) {
1106 		return (1);
1107 	}
1108 	return (0);
1109 }
1110 /*
1111  * scsi_pkt handling
1112  *
1113  * Visible to the external world via the transport structure.
1114  */
1115 
1116 int
1117 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1118 {
1119 	struct mrsas_instance	*instance = ADDR2MR(ap);
1120 	struct scsa_cmd		*acmd = PKT2CMD(pkt);
1121 	struct mrsas_cmd	*cmd = NULL;
1122 	uchar_t			cmd_done = 0;
1123 
1124 	con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1125 	if (instance->deadadapter == 1) {
1126 		dev_err(instance->dip, CE_WARN,
1127 		    "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1128 		    "for IO, as the HBA doesnt take any more IOs");
1129 		if (pkt) {
1130 			pkt->pkt_reason		= CMD_DEV_GONE;
1131 			pkt->pkt_statistics	= STAT_DISCON;
1132 		}
1133 		return (TRAN_FATAL_ERROR);
1134 	}
1135 	if (instance->adapterresetinprogress) {
1136 		con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1137 		    "returning mfi_pkt and setting TRAN_BUSY\n"));
1138 		return (TRAN_BUSY);
1139 	}
1140 	(void) mrsas_tbolt_prepare_pkt(acmd);
1141 
1142 	cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1143 
1144 	/*
1145 	 * Check if the command is already completed by the mrsas_build_cmd()
1146 	 * routine. In which case the busy_flag would be clear and scb will be
1147 	 * NULL and appropriate reason provided in pkt_reason field
1148 	 */
1149 	if (cmd_done) {
1150 		pkt->pkt_reason = CMD_CMPLT;
1151 		pkt->pkt_scbp[0] = STATUS_GOOD;
1152 		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1153 		    | STATE_SENT_CMD;
1154 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1155 			(*pkt->pkt_comp)(pkt);
1156 		}
1157 
1158 		return (TRAN_ACCEPT);
1159 	}
1160 
1161 	if (cmd == NULL) {
1162 		return (TRAN_BUSY);
1163 	}
1164 
1165 
1166 	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1167 		if (instance->fw_outstanding > instance->max_fw_cmds) {
1168 			dev_err(instance->dip, CE_WARN,
1169 			    "Command Queue Full... Returning BUSY");
1170 			DTRACE_PROBE2(tbolt_start_tran_err,
1171 			    uint16_t, instance->fw_outstanding,
1172 			    uint16_t, instance->max_fw_cmds);
1173 			return_raid_msg_pkt(instance, cmd);
1174 			return (TRAN_BUSY);
1175 		}
1176 
1177 		/* Synchronize the Cmd frame for the controller */
1178 		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1179 		    DDI_DMA_SYNC_FORDEV);
1180 
1181 		con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1182 		    "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1183 		    cmd->index, cmd->SMID));
1184 
1185 		instance->func_ptr->issue_cmd(cmd, instance);
1186 	} else {
1187 		instance->func_ptr->issue_cmd(cmd, instance);
1188 		(void) wait_for_outstanding_poll_io(instance);
1189 		(void) mrsas_common_check(instance, cmd);
1190 		DTRACE_PROBE2(tbolt_start_nointr_done,
1191 		    uint8_t, cmd->frame->hdr.cmd,
1192 		    uint8_t, cmd->frame->hdr.cmd_status);
1193 	}
1194 
1195 	return (TRAN_ACCEPT);
1196 }
1197 
1198 /*
1199  * prepare the pkt:
1200  * the pkt may have been resubmitted or just reused so
1201  * initialize some fields and do some checks.
1202  */
1203 static int
1204 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1205 {
1206 	struct scsi_pkt	*pkt = CMD2PKT(acmd);
1207 
1208 
1209 	/*
1210 	 * Reinitialize some fields that need it; the packet may
1211 	 * have been resubmitted
1212 	 */
1213 	pkt->pkt_reason = CMD_CMPLT;
1214 	pkt->pkt_state = 0;
1215 	pkt->pkt_statistics = 0;
1216 	pkt->pkt_resid = 0;
1217 
1218 	/*
1219 	 * zero status byte.
1220 	 */
1221 	*(pkt->pkt_scbp) = 0;
1222 
1223 	return (0);
1224 }
1225 
1226 
1227 int
1228 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1229     struct scsa_cmd *acmd,
1230     struct mrsas_cmd *cmd,
1231     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1232     uint32_t *datalen)
1233 {
1234 	uint32_t		MaxSGEs;
1235 	int			sg_to_process;
1236 	uint32_t		i, j;
1237 	uint32_t		numElements, endElement;
1238 	Mpi25IeeeSgeChain64_t	*ieeeChainElement = NULL;
1239 	Mpi25IeeeSgeChain64_t	*scsi_raid_io_sgl_ieee = NULL;
1240 	ddi_acc_handle_t acc_handle =
1241 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1242 
1243 	con_log(CL_ANN1, (CE_NOTE,
1244 	    "chkpnt: Building Chained SGL :%d", __LINE__));
1245 
1246 	/* Calulate SGE size in number of Words(32bit) */
1247 	/* Clear the datalen before updating it. */
1248 	*datalen = 0;
1249 
1250 	MaxSGEs = instance->max_sge_in_main_msg;
1251 
1252 	ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1253 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1254 
1255 	/* set data transfer flag. */
1256 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1257 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1258 		    MPI2_SCSIIO_CONTROL_WRITE);
1259 	} else {
1260 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1261 		    MPI2_SCSIIO_CONTROL_READ);
1262 	}
1263 
1264 
1265 	numElements = acmd->cmd_cookiecnt;
1266 
1267 	con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1268 
1269 	if (numElements > instance->max_num_sge) {
1270 		con_log(CL_ANN, (CE_NOTE,
1271 		    "[Max SGE Count Exceeded]:%x", numElements));
1272 		return (numElements);
1273 	}
1274 
1275 	ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1276 	    (uint8_t)numElements);
1277 
1278 	/* set end element in main message frame */
1279 	endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1280 
1281 	/* prepare the scatter-gather list for the firmware */
1282 	scsi_raid_io_sgl_ieee =
1283 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1284 
1285 	if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1286 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1287 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1288 
1289 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1290 	}
1291 
1292 	for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1293 		ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1294 		    acmd->cmd_dmacookies[i].dmac_laddress);
1295 
1296 		ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1297 		    acmd->cmd_dmacookies[i].dmac_size);
1298 
1299 		ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1300 
1301 		if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1302 			if (i == (numElements - 1)) {
1303 				ddi_put8(acc_handle,
1304 				    &scsi_raid_io_sgl_ieee->Flags,
1305 				    IEEE_SGE_FLAGS_END_OF_LIST);
1306 			}
1307 		}
1308 
1309 		*datalen += acmd->cmd_dmacookies[i].dmac_size;
1310 
1311 #ifdef DEBUG
1312 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1313 		    scsi_raid_io_sgl_ieee->Address));
1314 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1315 		    scsi_raid_io_sgl_ieee->Length));
1316 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1317 		    scsi_raid_io_sgl_ieee->Flags));
1318 #endif
1319 
1320 	}
1321 
1322 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1323 
1324 	/* check if chained SGL required */
1325 	if (i < numElements) {
1326 
1327 		con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1328 
1329 		if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1330 			uint16_t ioFlags =
1331 			    ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1332 
1333 			if ((ioFlags &
1334 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1335 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1336 				ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1337 				    (U8)instance->chain_offset_io_req);
1338 			} else {
1339 				ddi_put8(acc_handle,
1340 				    &scsi_raid_io->ChainOffset, 0);
1341 			}
1342 		} else {
1343 			ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1344 			    (U8)instance->chain_offset_io_req);
1345 		}
1346 
1347 		/* prepare physical chain element */
1348 		ieeeChainElement = scsi_raid_io_sgl_ieee;
1349 
1350 		ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1351 
1352 		if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1353 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1354 			    IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1355 		} else {
1356 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1357 			    (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1358 			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1359 		}
1360 
1361 		ddi_put32(acc_handle, &ieeeChainElement->Length,
1362 		    (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1363 
1364 		ddi_put64(acc_handle, &ieeeChainElement->Address,
1365 		    (U64)cmd->sgl_phys_addr);
1366 
1367 		sg_to_process = numElements - i;
1368 
1369 		con_log(CL_ANN1, (CE_NOTE,
1370 		    "[Additional SGE Count]:%x", endElement));
1371 
1372 		/* point to the chained SGL buffer */
1373 		scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1374 
1375 		/* build rest of the SGL in chained buffer */
1376 		for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1377 			con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1378 
1379 			ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1380 			    acmd->cmd_dmacookies[i].dmac_laddress);
1381 
1382 			ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1383 			    acmd->cmd_dmacookies[i].dmac_size);
1384 
1385 			ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1386 
1387 			if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1388 				if (i == (numElements - 1)) {
1389 					ddi_put8(acc_handle,
1390 					    &scsi_raid_io_sgl_ieee->Flags,
1391 					    IEEE_SGE_FLAGS_END_OF_LIST);
1392 				}
1393 			}
1394 
1395 			*datalen += acmd->cmd_dmacookies[i].dmac_size;
1396 
1397 #if DEBUG
1398 			con_log(CL_DLEVEL1, (CE_NOTE,
1399 			    "[SGL Address]: %" PRIx64,
1400 			    scsi_raid_io_sgl_ieee->Address));
1401 			con_log(CL_DLEVEL1, (CE_NOTE,
1402 			    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1403 			con_log(CL_DLEVEL1, (CE_NOTE,
1404 			    "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1405 #endif
1406 
1407 			i++;
1408 		}
1409 	}
1410 
1411 	return (0);
1412 } /*end of BuildScatterGather */
1413 
1414 
1415 /*
1416  * build_cmd
1417  */
1418 static struct mrsas_cmd *
1419 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1420     struct scsi_pkt *pkt, uchar_t *cmd_done)
1421 {
1422 	uint8_t		fp_possible = 0;
1423 	uint32_t	index;
1424 	uint32_t	lba_count = 0;
1425 	uint32_t	start_lba_hi = 0;
1426 	uint32_t	start_lba_lo = 0;
1427 	ddi_acc_handle_t acc_handle =
1428 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1429 	struct mrsas_cmd		*cmd = NULL;
1430 	struct scsa_cmd			*acmd = PKT2CMD(pkt);
1431 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
1432 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
1433 	uint32_t			datalen;
1434 	struct IO_REQUEST_INFO io_info;
1435 	MR_FW_RAID_MAP_ALL *local_map_ptr;
1436 	uint16_t pd_cmd_cdblen;
1437 
1438 	con_log(CL_DLEVEL1, (CE_NOTE,
1439 	    "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1440 
1441 	/* find out if this is logical or physical drive command.  */
1442 	acmd->islogical = MRDRV_IS_LOGICAL(ap);
1443 	acmd->device_id = MAP_DEVICE_ID(instance, ap);
1444 
1445 	*cmd_done = 0;
1446 
1447 	/* get the command packet */
1448 	if (!(cmd = get_raid_msg_pkt(instance))) {
1449 		DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1450 		    instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1451 		return (NULL);
1452 	}
1453 
1454 	index = cmd->index;
1455 	ReqDescUnion =	mr_sas_get_request_descriptor(instance, index);
1456 	ReqDescUnion->Words = 0;
1457 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1458 	ReqDescUnion->SCSIIO.RequestFlags =
1459 	    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1460 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1461 
1462 
1463 	cmd->request_desc = ReqDescUnion;
1464 	cmd->pkt = pkt;
1465 	cmd->cmd = acmd;
1466 
1467 	DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1468 	    ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1469 	    uint16_t, acmd->device_id);
1470 
1471 	/* lets get the command directions */
1472 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1473 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1474 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1475 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1476 			    DDI_DMA_SYNC_FORDEV);
1477 		}
1478 	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1479 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1480 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1481 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1482 			    DDI_DMA_SYNC_FORCPU);
1483 		}
1484 	} else {
1485 		con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1486 	}
1487 
1488 
1489 	/* get SCSI_IO raid message frame pointer */
1490 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1491 
1492 	/* zero out SCSI_IO raid message frame */
1493 	bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1494 
1495 	/* Set the ldTargetId set by BuildRaidContext() */
1496 	ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1497 	    acmd->device_id);
1498 
1499 	/*  Copy CDB to scsi_io_request message frame */
1500 	ddi_rep_put8(acc_handle,
1501 	    (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1502 	    acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1503 
1504 	/*
1505 	 * Just the CDB length, rest of the Flags are zero
1506 	 * This will be modified later.
1507 	 */
1508 	ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1509 
1510 	pd_cmd_cdblen = acmd->cmd_cdblen;
1511 
1512 	if (acmd->islogical) {
1513 
1514 		switch (pkt->pkt_cdbp[0]) {
1515 		case SCMD_READ:
1516 		case SCMD_WRITE:
1517 		case SCMD_READ_G1:
1518 		case SCMD_WRITE_G1:
1519 		case SCMD_READ_G4:
1520 		case SCMD_WRITE_G4:
1521 		case SCMD_READ_G5:
1522 		case SCMD_WRITE_G5:
1523 
1524 			/* Initialize sense Information */
1525 			if (cmd->sense1 == NULL) {
1526 				con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1527 				    "Sense buffer ptr NULL "));
1528 			}
1529 			bzero(cmd->sense1, SENSE_LENGTH);
1530 			con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1531 			    "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1532 
1533 			if (acmd->cmd_cdblen == CDB_GROUP0) {
1534 				/* 6-byte cdb */
1535 				lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1536 				start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1537 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1538 				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1539 				    << 16));
1540 			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
1541 				/* 10-byte cdb */
1542 				lba_count =
1543 				    (((uint16_t)(pkt->pkt_cdbp[8])) |
1544 				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1545 
1546 				start_lba_lo =
1547 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1548 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1549 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1550 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1551 
1552 			} else if (acmd->cmd_cdblen == CDB_GROUP5) {
1553 				/* 12-byte cdb */
1554 				lba_count = (
1555 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1556 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1557 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1558 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1559 
1560 				start_lba_lo =
1561 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1562 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1563 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1564 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1565 
1566 			} else if (acmd->cmd_cdblen == CDB_GROUP4) {
1567 				/* 16-byte cdb */
1568 				lba_count = (
1569 				    ((uint32_t)(pkt->pkt_cdbp[13])) |
1570 				    ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1571 				    ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1572 				    ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1573 
1574 				start_lba_lo = (
1575 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1576 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1577 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1578 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1579 
1580 				start_lba_hi = (
1581 				    ((uint32_t)(pkt->pkt_cdbp[5])) |
1582 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1583 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1584 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1585 			}
1586 
1587 			if (instance->tbolt &&
1588 			    ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1589 				dev_err(instance->dip, CE_WARN,
1590 				    "IO SECTOR COUNT exceeds "
1591 				    "controller limit 0x%x sectors",
1592 				    lba_count);
1593 			}
1594 
1595 			bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1596 			io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1597 			    start_lba_lo;
1598 			io_info.numBlocks = lba_count;
1599 			io_info.ldTgtId = acmd->device_id;
1600 
1601 			if (acmd->cmd_flags & CFLAG_DMASEND)
1602 				io_info.isRead = 0;
1603 			else
1604 				io_info.isRead = 1;
1605 
1606 
1607 			/* Acquire SYNC MAP UPDATE lock */
1608 			mutex_enter(&instance->sync_map_mtx);
1609 
1610 			local_map_ptr =
1611 			    instance->ld_map[(instance->map_id & 1)];
1612 
1613 			if ((MR_TargetIdToLdGet(
1614 			    acmd->device_id, local_map_ptr) >=
1615 			    MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1616 				dev_err(instance->dip, CE_NOTE,
1617 				    "Fast Path NOT Possible, "
1618 				    "targetId >= MAX_LOGICAL_DRIVES || "
1619 				    "!instance->fast_path_io");
1620 				fp_possible = 0;
1621 				/* Set Regionlock flags to BYPASS */
1622 				/* io_request->RaidContext.regLockFlags  = 0; */
1623 				ddi_put8(acc_handle,
1624 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1625 			} else {
1626 				if (MR_BuildRaidContext(instance, &io_info,
1627 				    &scsi_raid_io->RaidContext, local_map_ptr))
1628 					fp_possible = io_info.fpOkForIo;
1629 			}
1630 
1631 			if (!enable_fp)
1632 				fp_possible = 0;
1633 
1634 			con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1635 			    "instance->fast_path_io %d fp_possible %d",
1636 			    enable_fp, instance->fast_path_io, fp_possible));
1637 
1638 		if (fp_possible) {
1639 
1640 			/* Check for DIF enabled LD */
1641 			if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1642 				/* Prepare 32 Byte CDB for DIF capable Disk */
1643 				mrsas_tbolt_prepare_cdb(instance,
1644 				    scsi_raid_io->CDB.CDB32,
1645 				    &io_info, scsi_raid_io, start_lba_lo);
1646 			} else {
1647 				mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1648 				    (uint8_t *)&pd_cmd_cdblen,
1649 				    io_info.pdBlock, io_info.numBlocks);
1650 				ddi_put16(acc_handle,
1651 				    &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1652 			}
1653 
1654 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1655 			    MPI2_FUNCTION_SCSI_IO_REQUEST);
1656 
1657 			ReqDescUnion->SCSIIO.RequestFlags =
1658 			    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1659 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1660 
1661 			if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1662 				uint8_t regLockFlags = ddi_get8(acc_handle,
1663 				    &scsi_raid_io->RaidContext.regLockFlags);
1664 				uint16_t IoFlags = ddi_get16(acc_handle,
1665 				    &scsi_raid_io->IoFlags);
1666 
1667 				if (regLockFlags == REGION_TYPE_UNUSED)
1668 					ReqDescUnion->SCSIIO.RequestFlags =
1669 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1670 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1671 
1672 				IoFlags |=
1673 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1674 				regLockFlags |=
1675 				    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1676 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1677 
1678 				ddi_put8(acc_handle,
1679 				    &scsi_raid_io->ChainOffset, 0);
1680 				ddi_put8(acc_handle,
1681 				    &scsi_raid_io->RaidContext.nsegType,
1682 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1683 				    MPI2_TYPE_CUDA));
1684 				ddi_put8(acc_handle,
1685 				    &scsi_raid_io->RaidContext.regLockFlags,
1686 				    regLockFlags);
1687 				ddi_put16(acc_handle,
1688 				    &scsi_raid_io->IoFlags, IoFlags);
1689 			}
1690 
1691 			if ((instance->load_balance_info[
1692 			    acmd->device_id].loadBalanceFlag) &&
1693 			    (io_info.isRead)) {
1694 				io_info.devHandle =
1695 				    get_updated_dev_handle(&instance->
1696 				    load_balance_info[acmd->device_id],
1697 				    &io_info);
1698 				cmd->load_balance_flag |=
1699 				    MEGASAS_LOAD_BALANCE_FLAG;
1700 			} else {
1701 				cmd->load_balance_flag &=
1702 				    ~MEGASAS_LOAD_BALANCE_FLAG;
1703 			}
1704 
1705 			ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1706 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1707 			    io_info.devHandle);
1708 
1709 		} else { /* FP Not Possible */
1710 
1711 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1712 			    MPI2_FUNCTION_LD_IO_REQUEST);
1713 
1714 			ddi_put16(acc_handle,
1715 			    &scsi_raid_io->DevHandle, acmd->device_id);
1716 
1717 			ReqDescUnion->SCSIIO.RequestFlags =
1718 			    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1719 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1720 
1721 			ddi_put16(acc_handle,
1722 			    &scsi_raid_io->RaidContext.timeoutValue,
1723 			    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1724 
1725 			if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1726 				uint8_t regLockFlags = ddi_get8(acc_handle,
1727 				    &scsi_raid_io->RaidContext.regLockFlags);
1728 
1729 				if (regLockFlags == REGION_TYPE_UNUSED) {
1730 					ReqDescUnion->SCSIIO.RequestFlags =
1731 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1732 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1733 				}
1734 
1735 				regLockFlags |=
1736 				    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1737 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1738 
1739 				ddi_put8(acc_handle,
1740 				    &scsi_raid_io->RaidContext.nsegType,
1741 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1742 				    MPI2_TYPE_CUDA));
1743 				ddi_put8(acc_handle,
1744 				    &scsi_raid_io->RaidContext.regLockFlags,
1745 				    regLockFlags);
1746 			}
1747 		} /* Not FP */
1748 
1749 		/* Release SYNC MAP UPDATE lock */
1750 		mutex_exit(&instance->sync_map_mtx);
1751 
1752 		break;
1753 
1754 		case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1755 			return_raid_msg_pkt(instance, cmd);
1756 			*cmd_done = 1;
1757 			return (NULL);
1758 		}
1759 
1760 		case SCMD_MODE_SENSE:
1761 		case SCMD_MODE_SENSE_G1: {
1762 			union scsi_cdb	*cdbp;
1763 			uint16_t	page_code;
1764 
1765 			cdbp = (void *)pkt->pkt_cdbp;
1766 			page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1767 			switch (page_code) {
1768 			case 0x3:
1769 			case 0x4:
1770 				(void) mrsas_mode_sense_build(pkt);
1771 				return_raid_msg_pkt(instance, cmd);
1772 				*cmd_done = 1;
1773 				return (NULL);
1774 			}
1775 			return (cmd);
1776 		}
1777 
1778 		default:
1779 			/* Pass-through command to logical drive */
1780 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1781 			    MPI2_FUNCTION_LD_IO_REQUEST);
1782 			ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1783 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1784 			    acmd->device_id);
1785 			ReqDescUnion->SCSIIO.RequestFlags =
1786 			    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1787 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1788 			break;
1789 		}
1790 	} else { /* Physical */
1791 #ifdef PDSUPPORT
1792 		/* Pass-through command to physical drive */
1793 
1794 		/* Acquire SYNC MAP UPDATE lock */
1795 		mutex_enter(&instance->sync_map_mtx);
1796 
1797 		local_map_ptr = instance->ld_map[instance->map_id & 1];
1798 
1799 		ddi_put8(acc_handle, &scsi_raid_io->Function,
1800 		    MPI2_FUNCTION_SCSI_IO_REQUEST);
1801 
1802 		ReqDescUnion->SCSIIO.RequestFlags =
1803 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1804 		    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1805 
1806 		ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1807 		    local_map_ptr->raidMap.
1808 		    devHndlInfo[acmd->device_id].curDevHdl);
1809 
1810 		/* Set regLockFlasgs to REGION_TYPE_BYPASS */
1811 		ddi_put8(acc_handle,
1812 		    &scsi_raid_io->RaidContext.regLockFlags, 0);
1813 		ddi_put64(acc_handle,
1814 		    &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1815 		ddi_put32(acc_handle,
1816 		    &scsi_raid_io->RaidContext.regLockLength, 0);
1817 		ddi_put8(acc_handle,
1818 		    &scsi_raid_io->RaidContext.RAIDFlags,
1819 		    MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1820 		    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1821 		ddi_put16(acc_handle,
1822 		    &scsi_raid_io->RaidContext.timeoutValue,
1823 		    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1824 		ddi_put16(acc_handle,
1825 		    &scsi_raid_io->RaidContext.ldTargetId,
1826 		    acmd->device_id);
1827 		ddi_put8(acc_handle,
1828 		    &scsi_raid_io->LUN[1], acmd->lun);
1829 
1830 		if (instance->fast_path_io &&
1831 		    instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1832 			uint16_t IoFlags = ddi_get16(acc_handle,
1833 			    &scsi_raid_io->IoFlags);
1834 			IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1835 			ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1836 		}
1837 		ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1838 		    local_map_ptr->raidMap.
1839 		    devHndlInfo[acmd->device_id].curDevHdl);
1840 
1841 		/* Release SYNC MAP UPDATE lock */
1842 		mutex_exit(&instance->sync_map_mtx);
1843 #else
1844 		/* If no PD support, return here. */
1845 		return (cmd);
1846 #endif
1847 	}
1848 
1849 	/* Set sense buffer physical address/length in scsi_io_request. */
1850 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1851 	    cmd->sense_phys_addr1);
1852 	ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1853 
1854 	/* Construct SGL */
1855 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1856 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1857 
1858 	(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1859 	    scsi_raid_io, &datalen);
1860 
1861 	ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1862 
1863 	con_log(CL_ANN, (CE_CONT,
1864 	    "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1865 	    pkt->pkt_cdbp[0], acmd->device_id));
1866 	con_log(CL_DLEVEL1, (CE_CONT,
1867 	    "data length = %x\n",
1868 	    scsi_raid_io->DataLength));
1869 	con_log(CL_DLEVEL1, (CE_CONT,
1870 	    "cdb length = %x\n",
1871 	    acmd->cmd_cdblen));
1872 
1873 	return (cmd);
1874 }
1875 
1876 uint32_t
1877 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1878 {
1879 	return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1880 }
1881 
1882 void
1883 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1884 {
1885 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1886 	atomic_inc_16(&instance->fw_outstanding);
1887 
1888 	struct scsi_pkt *pkt;
1889 
1890 	con_log(CL_ANN1,
1891 	    (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1892 
1893 	con_log(CL_DLEVEL1, (CE_CONT,
1894 	    " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1895 	con_log(CL_DLEVEL1, (CE_CONT,
1896 	    " [req desc low part] %x \n",
1897 	    (uint_t)(req_desc->Words & 0xffffffffff)));
1898 	con_log(CL_DLEVEL1, (CE_CONT,
1899 	    " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1900 	pkt = cmd->pkt;
1901 
1902 	if (pkt) {
1903 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1904 		    "ISSUED CMD TO FW : called : cmd:"
1905 		    ": %p instance : %p pkt : %p pkt_time : %x\n",
1906 		    gethrtime(), (void *)cmd, (void *)instance,
1907 		    (void *)pkt, cmd->drv_pkt_time));
1908 		if (instance->adapterresetinprogress) {
1909 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1910 			con_log(CL_ANN, (CE_NOTE,
1911 			    "TBOLT Reset the scsi_pkt timer"));
1912 		} else {
1913 			push_pending_mfi_pkt(instance, cmd);
1914 		}
1915 
1916 	} else {
1917 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1918 		    "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1919 		    "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1920 	}
1921 
1922 	/* Issue the command to the FW */
1923 	mutex_enter(&instance->reg_write_mtx);
1924 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1925 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1926 	mutex_exit(&instance->reg_write_mtx);
1927 }
1928 
1929 /*
1930  * issue_cmd_in_sync_mode
1931  */
1932 int
1933 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1934     struct mrsas_cmd *cmd)
1935 {
1936 	int		i;
1937 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1938 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1939 
1940 	struct mrsas_header	*hdr;
1941 	hdr = (struct mrsas_header *)&cmd->frame->hdr;
1942 
1943 	con_log(CL_ANN,
1944 	    (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1945 	    cmd->SMID));
1946 
1947 
1948 	if (instance->adapterresetinprogress) {
1949 		cmd->drv_pkt_time = ddi_get16
1950 		    (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1951 		if (cmd->drv_pkt_time < debug_timeout_g)
1952 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1953 		con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1954 		    "RESET-IN-PROGRESS, issue cmd & return."));
1955 
1956 		mutex_enter(&instance->reg_write_mtx);
1957 		WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1958 		WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1959 		mutex_exit(&instance->reg_write_mtx);
1960 
1961 		return (DDI_SUCCESS);
1962 	} else {
1963 		con_log(CL_ANN1, (CE_NOTE,
1964 		    "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1965 		push_pending_mfi_pkt(instance, cmd);
1966 	}
1967 
1968 	con_log(CL_DLEVEL2, (CE_NOTE,
1969 	    "HighQport offset :%p",
1970 	    (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1971 	con_log(CL_DLEVEL2, (CE_NOTE,
1972 	    "LowQport offset :%p",
1973 	    (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1974 
1975 	cmd->sync_cmd = MRSAS_TRUE;
1976 	cmd->cmd_status =  ENODATA;
1977 
1978 
1979 	mutex_enter(&instance->reg_write_mtx);
1980 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1981 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1982 	mutex_exit(&instance->reg_write_mtx);
1983 
1984 	con_log(CL_ANN1, (CE_NOTE,
1985 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1986 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1987 	    (uint_t)(req_desc->Words & 0xffffffff)));
1988 
1989 	mutex_enter(&instance->int_cmd_mtx);
1990 	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
1991 		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
1992 	}
1993 	mutex_exit(&instance->int_cmd_mtx);
1994 
1995 
1996 	if (i < (msecs -1)) {
1997 		return (DDI_SUCCESS);
1998 	} else {
1999 		return (DDI_FAILURE);
2000 	}
2001 }
2002 
2003 /*
2004  * issue_cmd_in_poll_mode
2005  */
2006 int
2007 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2008     struct mrsas_cmd *cmd)
2009 {
2010 	int		i;
2011 	uint16_t	flags;
2012 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2013 	struct mrsas_header *frame_hdr;
2014 
2015 	con_log(CL_ANN,
2016 	    (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2017 	    cmd->SMID));
2018 
2019 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2020 
2021 	frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2022 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2023 	    MFI_CMD_STATUS_POLL_MODE);
2024 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2025 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2026 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2027 
2028 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2029 	    (uint_t)(req_desc->Words & 0xffffffff)));
2030 	con_log(CL_ANN1, (CE_NOTE,
2031 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2032 
2033 	/* issue the frame using inbound queue port */
2034 	mutex_enter(&instance->reg_write_mtx);
2035 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2036 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2037 	mutex_exit(&instance->reg_write_mtx);
2038 
2039 	for (i = 0; i < msecs && (
2040 	    ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2041 	    == MFI_CMD_STATUS_POLL_MODE); i++) {
2042 		/* wait for cmd_status to change from 0xFF */
2043 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2044 	}
2045 
2046 	if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2047 	    &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2048 		con_log(CL_ANN1, (CE_NOTE,
2049 		    " cmd failed %" PRIx64, (req_desc->Words)));
2050 		return (DDI_FAILURE);
2051 	}
2052 
2053 	return (DDI_SUCCESS);
2054 }
2055 
2056 void
2057 tbolt_enable_intr(struct mrsas_instance *instance)
2058 {
2059 	/* TODO: For Thunderbolt/Invader also clear intr on enable */
2060 	/* writel(~0, &regs->outbound_intr_status); */
2061 	/* readl(&regs->outbound_intr_status); */
2062 
2063 	WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2064 
2065 	/* dummy read to force PCI flush */
2066 	(void) RD_OB_INTR_MASK(instance);
2067 
2068 }
2069 
2070 void
2071 tbolt_disable_intr(struct mrsas_instance *instance)
2072 {
2073 	uint32_t mask = 0xFFFFFFFF;
2074 
2075 	WR_OB_INTR_MASK(mask, instance);
2076 
2077 	/* Dummy readl to force pci flush */
2078 
2079 	(void) RD_OB_INTR_MASK(instance);
2080 }
2081 
2082 
2083 int
2084 tbolt_intr_ack(struct mrsas_instance *instance)
2085 {
2086 	uint32_t	status;
2087 
2088 	/* check if it is our interrupt */
2089 	status = RD_OB_INTR_STATUS(instance);
2090 	con_log(CL_ANN1, (CE_NOTE,
2091 	    "chkpnt: Entered tbolt_intr_ack status = %d", status));
2092 
2093 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2094 		return (DDI_INTR_UNCLAIMED);
2095 	}
2096 
2097 	if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2098 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2099 		return (DDI_INTR_UNCLAIMED);
2100 	}
2101 
2102 	if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2103 		/* clear the interrupt by writing back the same value */
2104 		WR_OB_INTR_STATUS(status, instance);
2105 		/* dummy READ */
2106 		(void) RD_OB_INTR_STATUS(instance);
2107 	}
2108 	return (DDI_INTR_CLAIMED);
2109 }
2110 
2111 /*
2112  * get_raid_msg_pkt : Get a command from the free pool
2113  * After successful allocation, the caller of this routine
2114  * must clear the frame buffer (memset to zero) before
2115  * using the packet further.
2116  *
2117  * ***** Note *****
2118  * After clearing the frame buffer the context id of the
2119  * frame buffer SHOULD be restored back.
2120  */
2121 
2122 struct mrsas_cmd *
2123 get_raid_msg_pkt(struct mrsas_instance *instance)
2124 {
2125 	mlist_t			*head = &instance->cmd_pool_list;
2126 	struct mrsas_cmd	*cmd = NULL;
2127 
2128 	mutex_enter(&instance->cmd_pool_mtx);
2129 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2130 
2131 
2132 	if (!mlist_empty(head)) {
2133 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2134 		mlist_del_init(head->next);
2135 	}
2136 	if (cmd != NULL) {
2137 		cmd->pkt = NULL;
2138 		cmd->retry_count_for_ocr = 0;
2139 		cmd->drv_pkt_time = 0;
2140 	}
2141 	mutex_exit(&instance->cmd_pool_mtx);
2142 
2143 	if (cmd != NULL)
2144 		bzero(cmd->scsi_io_request,
2145 		    sizeof (Mpi2RaidSCSIIORequest_t));
2146 	return (cmd);
2147 }
2148 
2149 struct mrsas_cmd *
2150 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2151 {
2152 	mlist_t			*head = &instance->cmd_app_pool_list;
2153 	struct mrsas_cmd	*cmd = NULL;
2154 
2155 	mutex_enter(&instance->cmd_app_pool_mtx);
2156 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2157 
2158 	if (!mlist_empty(head)) {
2159 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2160 		mlist_del_init(head->next);
2161 	}
2162 	if (cmd != NULL) {
2163 		cmd->retry_count_for_ocr = 0;
2164 		cmd->drv_pkt_time = 0;
2165 		cmd->pkt = NULL;
2166 		cmd->request_desc = NULL;
2167 
2168 	}
2169 
2170 	mutex_exit(&instance->cmd_app_pool_mtx);
2171 
2172 	if (cmd != NULL) {
2173 		bzero(cmd->scsi_io_request,
2174 		    sizeof (Mpi2RaidSCSIIORequest_t));
2175 	}
2176 
2177 	return (cmd);
2178 }
2179 
2180 /*
2181  * return_raid_msg_pkt : Return a cmd to free command pool
2182  */
2183 void
2184 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2185 {
2186 	mutex_enter(&instance->cmd_pool_mtx);
2187 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2188 
2189 
2190 	mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2191 
2192 	mutex_exit(&instance->cmd_pool_mtx);
2193 }
2194 
2195 void
2196 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2197 {
2198 	mutex_enter(&instance->cmd_app_pool_mtx);
2199 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2200 
2201 	mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2202 
2203 	mutex_exit(&instance->cmd_app_pool_mtx);
2204 }
2205 
2206 
2207 void
2208 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2209     struct mrsas_cmd *cmd)
2210 {
2211 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2212 	Mpi25IeeeSgeChain64_t		*scsi_raid_io_sgl_ieee;
2213 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
2214 	uint32_t			index;
2215 	ddi_acc_handle_t acc_handle =
2216 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2217 
2218 	if (!instance->tbolt) {
2219 		con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2220 		return;
2221 	}
2222 
2223 	index = cmd->index;
2224 
2225 	ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2226 
2227 	if (!ReqDescUnion) {
2228 		con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2229 		return;
2230 	}
2231 
2232 	con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2233 
2234 	ReqDescUnion->Words = 0;
2235 
2236 	ReqDescUnion->SCSIIO.RequestFlags =
2237 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2238 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2239 
2240 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2241 
2242 	cmd->request_desc = ReqDescUnion;
2243 
2244 	/* get raid message frame pointer */
2245 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2246 
2247 	if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2248 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2249 		    &scsi_raid_io->SGL.IeeeChain;
2250 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2251 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2252 	}
2253 
2254 	ddi_put8(acc_handle, &scsi_raid_io->Function,
2255 	    MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2256 
2257 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2258 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2259 
2260 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2261 	    (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2262 
2263 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2264 	    cmd->sense_phys_addr1);
2265 
2266 
2267 	scsi_raid_io_sgl_ieee =
2268 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2269 
2270 	ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2271 	    (U64)cmd->frame_phys_addr);
2272 
2273 	ddi_put8(acc_handle,
2274 	    &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2275 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2276 	/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2277 	ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2278 
2279 	con_log(CL_ANN1, (CE_NOTE,
2280 	    "[MFI CMD PHY ADDRESS]:%" PRIx64,
2281 	    scsi_raid_io_sgl_ieee->Address));
2282 	con_log(CL_ANN1, (CE_NOTE,
2283 	    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2284 	con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2285 	    scsi_raid_io_sgl_ieee->Flags));
2286 }
2287 
2288 
2289 void
2290 tbolt_complete_cmd(struct mrsas_instance *instance,
2291     struct mrsas_cmd *cmd)
2292 {
2293 	uint8_t				status;
2294 	uint8_t				extStatus;
2295 	uint8_t				function;
2296 	uint8_t				arm;
2297 	struct scsa_cmd			*acmd;
2298 	struct scsi_pkt			*pkt;
2299 	struct scsi_arq_status		*arqstat;
2300 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2301 	LD_LOAD_BALANCE_INFO		*lbinfo;
2302 	ddi_acc_handle_t acc_handle =
2303 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2304 
2305 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2306 
2307 	status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2308 	extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2309 
2310 	con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2311 	con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2312 
2313 	if (status != MFI_STAT_OK) {
2314 		con_log(CL_ANN, (CE_WARN,
2315 		    "IO Cmd Failed SMID %x", cmd->SMID));
2316 	} else {
2317 		con_log(CL_ANN, (CE_NOTE,
2318 		    "IO Cmd Success  SMID %x", cmd->SMID));
2319 	}
2320 
2321 	/* regular commands */
2322 
2323 	function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2324 	DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2325 	    uint8_t, status, uint8_t, extStatus);
2326 
2327 	switch (function) {
2328 
2329 	case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2330 		acmd =	(struct scsa_cmd *)cmd->cmd;
2331 		lbinfo = &instance->load_balance_info[acmd->device_id];
2332 
2333 		if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2334 			arm = lbinfo->raid1DevHandle[0] ==
2335 			    scsi_raid_io->DevHandle ? 0 : 1;
2336 
2337 			lbinfo->scsi_pending_cmds[arm]--;
2338 			cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2339 		}
2340 		con_log(CL_DLEVEL3, (CE_NOTE,
2341 		    "FastPath IO Completion Success "));
2342 		/* FALLTHRU */
2343 
2344 	case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2345 		acmd =	(struct scsa_cmd *)cmd->cmd;
2346 		pkt =	(struct scsi_pkt *)CMD2PKT(acmd);
2347 
2348 		if (acmd->cmd_flags & CFLAG_DMAVALID) {
2349 			if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2350 				(void) ddi_dma_sync(acmd->cmd_dmahandle,
2351 				    acmd->cmd_dma_offset, acmd->cmd_dma_len,
2352 				    DDI_DMA_SYNC_FORCPU);
2353 			}
2354 		}
2355 
2356 		pkt->pkt_reason		= CMD_CMPLT;
2357 		pkt->pkt_statistics	= 0;
2358 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2359 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2360 
2361 		con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2362 		    "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2363 		    ((acmd->islogical) ? "LD" : "PD"),
2364 		    acmd->cmd_dmacount, cmd->SMID, status));
2365 
2366 		if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2367 			struct scsi_inquiry	*inq;
2368 
2369 			if (acmd->cmd_dmacount != 0) {
2370 				bp_mapin(acmd->cmd_buf);
2371 				inq = (struct scsi_inquiry *)
2372 				    acmd->cmd_buf->b_un.b_addr;
2373 
2374 				/* don't expose physical drives to OS */
2375 				if (acmd->islogical &&
2376 				    (status == MFI_STAT_OK)) {
2377 					display_scsi_inquiry((caddr_t)inq);
2378 #ifdef PDSUPPORT
2379 				} else if ((status == MFI_STAT_OK) &&
2380 				    inq->inq_dtype == DTYPE_DIRECT) {
2381 					display_scsi_inquiry((caddr_t)inq);
2382 #endif
2383 				} else {
2384 					/* for physical disk */
2385 					status = MFI_STAT_DEVICE_NOT_FOUND;
2386 				}
2387 			}
2388 		}
2389 
2390 		switch (status) {
2391 		case MFI_STAT_OK:
2392 			pkt->pkt_scbp[0] = STATUS_GOOD;
2393 			break;
2394 		case MFI_STAT_LD_CC_IN_PROGRESS:
2395 		case MFI_STAT_LD_RECON_IN_PROGRESS:
2396 			pkt->pkt_scbp[0] = STATUS_GOOD;
2397 			break;
2398 		case MFI_STAT_LD_INIT_IN_PROGRESS:
2399 			pkt->pkt_reason	= CMD_TRAN_ERR;
2400 			break;
2401 		case MFI_STAT_SCSI_IO_FAILED:
2402 			dev_err(instance->dip, CE_WARN,
2403 			    "tbolt_complete_cmd: scsi_io failed");
2404 			pkt->pkt_reason	= CMD_TRAN_ERR;
2405 			break;
2406 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
2407 			con_log(CL_ANN, (CE_WARN,
2408 			    "tbolt_complete_cmd: scsi_done with error"));
2409 
2410 			pkt->pkt_reason	= CMD_CMPLT;
2411 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2412 
2413 			if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2414 				con_log(CL_ANN,
2415 				    (CE_WARN, "TEST_UNIT_READY fail"));
2416 			} else {
2417 				pkt->pkt_state |= STATE_ARQ_DONE;
2418 				arqstat = (void *)(pkt->pkt_scbp);
2419 				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2420 				arqstat->sts_rqpkt_resid = 0;
2421 				arqstat->sts_rqpkt_state |=
2422 				    STATE_GOT_BUS | STATE_GOT_TARGET
2423 				    | STATE_SENT_CMD
2424 				    | STATE_XFERRED_DATA;
2425 				*(uint8_t *)&arqstat->sts_rqpkt_status =
2426 				    STATUS_GOOD;
2427 				con_log(CL_ANN1,
2428 				    (CE_NOTE, "Copying Sense data %x",
2429 				    cmd->SMID));
2430 
2431 				ddi_rep_get8(acc_handle,
2432 				    (uint8_t *)&(arqstat->sts_sensedata),
2433 				    cmd->sense1,
2434 				    sizeof (struct scsi_extended_sense),
2435 				    DDI_DEV_AUTOINCR);
2436 
2437 			}
2438 			break;
2439 		case MFI_STAT_LD_OFFLINE:
2440 			dev_err(instance->dip, CE_WARN,
2441 			    "tbolt_complete_cmd: ld offline "
2442 			    "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2443 			    /* UNDO: */
2444 			    ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2445 
2446 			    ddi_get16(acc_handle,
2447 			    &scsi_raid_io->RaidContext.ldTargetId),
2448 
2449 			    ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2450 
2451 			pkt->pkt_reason	= CMD_DEV_GONE;
2452 			pkt->pkt_statistics  = STAT_DISCON;
2453 			break;
2454 		case MFI_STAT_DEVICE_NOT_FOUND:
2455 			con_log(CL_ANN, (CE_CONT,
2456 			    "tbolt_complete_cmd: device not found error"));
2457 			pkt->pkt_reason	= CMD_DEV_GONE;
2458 			pkt->pkt_statistics  = STAT_DISCON;
2459 			break;
2460 
2461 		case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2462 			pkt->pkt_state |= STATE_ARQ_DONE;
2463 			pkt->pkt_reason	= CMD_CMPLT;
2464 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2465 
2466 			arqstat = (void *)(pkt->pkt_scbp);
2467 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
2468 			arqstat->sts_rqpkt_resid = 0;
2469 			arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2470 			    | STATE_GOT_TARGET | STATE_SENT_CMD
2471 			    | STATE_XFERRED_DATA;
2472 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2473 
2474 			arqstat->sts_sensedata.es_valid = 1;
2475 			arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2476 			arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2477 
2478 			/*
2479 			 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2480 			 * ASC: 0x21h; ASCQ: 0x00h;
2481 			 */
2482 			arqstat->sts_sensedata.es_add_code = 0x21;
2483 			arqstat->sts_sensedata.es_qual_code = 0x00;
2484 			break;
2485 		case MFI_STAT_INVALID_CMD:
2486 		case MFI_STAT_INVALID_DCMD:
2487 		case MFI_STAT_INVALID_PARAMETER:
2488 		case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2489 		default:
2490 			dev_err(instance->dip, CE_WARN,
2491 			    "tbolt_complete_cmd: Unknown status!");
2492 			pkt->pkt_reason	= CMD_TRAN_ERR;
2493 
2494 			break;
2495 		}
2496 
2497 		atomic_add_16(&instance->fw_outstanding, (-1));
2498 
2499 		(void) mrsas_common_check(instance, cmd);
2500 		if (acmd->cmd_dmahandle) {
2501 			if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2502 			    DDI_SUCCESS) {
2503 				ddi_fm_service_impact(instance->dip,
2504 				    DDI_SERVICE_UNAFFECTED);
2505 				pkt->pkt_reason = CMD_TRAN_ERR;
2506 				pkt->pkt_statistics = 0;
2507 			}
2508 		}
2509 
2510 		/* Call the callback routine */
2511 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2512 			(*pkt->pkt_comp)(pkt);
2513 
2514 		con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2515 
2516 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2517 
2518 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2519 
2520 		return_raid_msg_pkt(instance, cmd);
2521 		break;
2522 	}
2523 	case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	 /* MFA command. */
2524 
2525 		if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2526 		    cmd->frame->dcmd.mbox.b[1] == 1) {
2527 
2528 			mutex_enter(&instance->sync_map_mtx);
2529 
2530 			con_log(CL_ANN, (CE_NOTE,
2531 			    "LDMAP sync command	SMID RECEIVED 0x%X",
2532 			    cmd->SMID));
2533 			if (cmd->frame->hdr.cmd_status != 0) {
2534 				dev_err(instance->dip, CE_WARN,
2535 				    "map sync failed, status = 0x%x.",
2536 				    cmd->frame->hdr.cmd_status);
2537 			} else {
2538 				instance->map_id++;
2539 				con_log(CL_ANN1, (CE_NOTE,
2540 				    "map sync received, switched map_id to %"
2541 				    PRIu64, instance->map_id));
2542 			}
2543 
2544 			if (MR_ValidateMapInfo(
2545 			    instance->ld_map[instance->map_id & 1],
2546 			    instance->load_balance_info)) {
2547 				instance->fast_path_io = 1;
2548 			} else {
2549 				instance->fast_path_io = 0;
2550 			}
2551 
2552 			con_log(CL_ANN, (CE_NOTE,
2553 			    "instance->fast_path_io %d",
2554 			    instance->fast_path_io));
2555 
2556 			instance->unroll.syncCmd = 0;
2557 
2558 			if (instance->map_update_cmd == cmd) {
2559 				return_raid_msg_pkt(instance, cmd);
2560 				atomic_add_16(&instance->fw_outstanding, (-1));
2561 				(void) mrsas_tbolt_sync_map_info(instance);
2562 			}
2563 
2564 			con_log(CL_ANN1, (CE_NOTE,
2565 			    "LDMAP sync completed, ldcount=%d",
2566 			    instance->ld_map[instance->map_id & 1]
2567 			    ->raidMap.ldCount));
2568 			mutex_exit(&instance->sync_map_mtx);
2569 			break;
2570 		}
2571 
2572 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2573 			con_log(CL_ANN1, (CE_CONT,
2574 			    "AEN command SMID RECEIVED 0x%X",
2575 			    cmd->SMID));
2576 			if ((instance->aen_cmd == cmd) &&
2577 			    (instance->aen_cmd->abort_aen)) {
2578 				con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2579 				    "aborted_aen returned"));
2580 			} else {
2581 				atomic_add_16(&instance->fw_outstanding, (-1));
2582 				service_mfi_aen(instance, cmd);
2583 			}
2584 		}
2585 
2586 		if (cmd->sync_cmd == MRSAS_TRUE) {
2587 			con_log(CL_ANN1, (CE_CONT,
2588 			    "Sync-mode Command Response SMID RECEIVED 0x%X",
2589 			    cmd->SMID));
2590 
2591 			tbolt_complete_cmd_in_sync_mode(instance, cmd);
2592 		} else {
2593 			con_log(CL_ANN, (CE_CONT,
2594 			    "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2595 			    cmd->SMID));
2596 		}
2597 		break;
2598 	default:
2599 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2600 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2601 
2602 		/* free message */
2603 		con_log(CL_ANN,
2604 		    (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2605 		break;
2606 	}
2607 }
2608 
2609 uint_t
2610 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2611 {
2612 	uint8_t				replyType;
2613 	Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2614 	Mpi2ReplyDescriptorsUnion_t	*desc;
2615 	uint16_t			smid;
2616 	union desc_value		d_val;
2617 	struct mrsas_cmd		*cmd;
2618 
2619 	struct mrsas_header	*hdr;
2620 	struct scsi_pkt		*pkt;
2621 
2622 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2623 	    0, 0, DDI_DMA_SYNC_FORDEV);
2624 
2625 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2626 	    0, 0, DDI_DMA_SYNC_FORCPU);
2627 
2628 	desc = instance->reply_frame_pool;
2629 	desc += instance->reply_read_index;
2630 
2631 	replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2632 	replyType = replyDesc->ReplyFlags &
2633 	    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2634 
2635 	if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2636 		return (DDI_INTR_UNCLAIMED);
2637 
2638 	if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2639 	    != DDI_SUCCESS) {
2640 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2641 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2642 		con_log(CL_ANN1,
2643 		    (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2644 		    "FMA check, returning DDI_INTR_UNCLAIMED"));
2645 		return (DDI_INTR_CLAIMED);
2646 	}
2647 
2648 	con_log(CL_ANN1, (CE_NOTE, "Reply Desc	= %p  Words = %" PRIx64,
2649 	    (void *)desc, desc->Words));
2650 
2651 	d_val.word = desc->Words;
2652 
2653 
2654 	/* Read Reply descriptor */
2655 	while ((d_val.u1.low != 0xffffffff) &&
2656 	    (d_val.u1.high != 0xffffffff)) {
2657 
2658 		(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2659 		    0, 0, DDI_DMA_SYNC_FORCPU);
2660 
2661 		smid = replyDesc->SMID;
2662 
2663 		if (!smid || smid > instance->max_fw_cmds + 1) {
2664 			con_log(CL_ANN1, (CE_NOTE,
2665 			    "Reply Desc at Break  = %p	Words = %" PRIx64,
2666 			    (void *)desc, desc->Words));
2667 			break;
2668 		}
2669 
2670 		cmd	= instance->cmd_list[smid - 1];
2671 		if (!cmd) {
2672 			con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2673 			    "outstanding_cmd: Invalid command "
2674 			    " or Poll commad Received in completion path"));
2675 		} else {
2676 			mutex_enter(&instance->cmd_pend_mtx);
2677 			if (cmd->sync_cmd == MRSAS_TRUE) {
2678 				hdr = (struct mrsas_header *)&cmd->frame->hdr;
2679 				if (hdr) {
2680 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2681 					    "tbolt_process_outstanding_cmd:"
2682 					    " mlist_del_init(&cmd->list)."));
2683 					mlist_del_init(&cmd->list);
2684 				}
2685 			} else {
2686 				pkt = cmd->pkt;
2687 				if (pkt) {
2688 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2689 					    "tbolt_process_outstanding_cmd:"
2690 					    "mlist_del_init(&cmd->list)."));
2691 					mlist_del_init(&cmd->list);
2692 				}
2693 			}
2694 
2695 			mutex_exit(&instance->cmd_pend_mtx);
2696 
2697 			tbolt_complete_cmd(instance, cmd);
2698 		}
2699 		/* set it back to all 1s. */
2700 		desc->Words = -1LL;
2701 
2702 		instance->reply_read_index++;
2703 
2704 		if (instance->reply_read_index >= (instance->reply_q_depth)) {
2705 			con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2706 			instance->reply_read_index = 0;
2707 		}
2708 
2709 		/* Get the next reply descriptor */
2710 		if (!instance->reply_read_index)
2711 			desc = instance->reply_frame_pool;
2712 		else
2713 			desc++;
2714 
2715 		replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2716 
2717 		d_val.word = desc->Words;
2718 
2719 		con_log(CL_ANN1, (CE_NOTE,
2720 		    "Next Reply Desc  = %p Words = %" PRIx64,
2721 		    (void *)desc, desc->Words));
2722 
2723 		replyType = replyDesc->ReplyFlags &
2724 		    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2725 
2726 		if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2727 			break;
2728 
2729 	} /* End of while loop. */
2730 
2731 	/* update replyIndex to FW */
2732 	WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2733 
2734 
2735 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2736 	    0, 0, DDI_DMA_SYNC_FORDEV);
2737 
2738 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2739 	    0, 0, DDI_DMA_SYNC_FORCPU);
2740 	return (DDI_INTR_CLAIMED);
2741 }
2742 
2743 
2744 
2745 
2746 /*
2747  * complete_cmd_in_sync_mode -	Completes an internal command
2748  * @instance:			Adapter soft state
2749  * @cmd:			Command to be completed
2750  *
2751  * The issue_cmd_in_sync_mode() function waits for a command to complete
2752  * after it issues a command. This function wakes up that waiting routine by
2753  * calling wake_up() on the wait queue.
2754  */
2755 void
2756 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2757     struct mrsas_cmd *cmd)
2758 {
2759 
2760 	cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2761 	    &cmd->frame->io.cmd_status);
2762 
2763 	cmd->sync_cmd = MRSAS_FALSE;
2764 
2765 	mutex_enter(&instance->int_cmd_mtx);
2766 	if (cmd->cmd_status == ENODATA) {
2767 		cmd->cmd_status = 0;
2768 	}
2769 	cv_broadcast(&instance->int_cmd_cv);
2770 	mutex_exit(&instance->int_cmd_mtx);
2771 
2772 }
2773 
2774 /*
2775  * mrsas_tbolt_get_ld_map_info -	Returns	 ld_map structure
2776  * instance:				Adapter soft state
2777  *
2778  * Issues an internal command (DCMD) to get the FW's controller PD
2779  * list structure.  This information is mainly used to find out SYSTEM
2780  * supported by the FW.
2781  */
2782 int
2783 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2784 {
2785 	int ret = 0;
2786 	struct mrsas_cmd	*cmd = NULL;
2787 	struct mrsas_dcmd_frame	*dcmd;
2788 	MR_FW_RAID_MAP_ALL *ci;
2789 	uint32_t ci_h = 0;
2790 	U32 size_map_info;
2791 
2792 	cmd = get_raid_msg_pkt(instance);
2793 
2794 	if (cmd == NULL) {
2795 		dev_err(instance->dip, CE_WARN,
2796 		    "Failed to get a cmd from free-pool in get_ld_map_info()");
2797 		return (DDI_FAILURE);
2798 	}
2799 
2800 	dcmd = &cmd->frame->dcmd;
2801 
2802 	size_map_info =	sizeof (MR_FW_RAID_MAP) +
2803 	    (sizeof (MR_LD_SPAN_MAP) *
2804 	    (MAX_LOGICAL_DRIVES - 1));
2805 
2806 	con_log(CL_ANN, (CE_NOTE,
2807 	    "size_map_info : 0x%x", size_map_info));
2808 
2809 	ci = instance->ld_map[instance->map_id & 1];
2810 	ci_h = instance->ld_map_phy[instance->map_id & 1];
2811 
2812 	if (!ci) {
2813 		dev_err(instance->dip, CE_WARN,
2814 		    "Failed to alloc mem for ld_map_info");
2815 		return_raid_msg_pkt(instance, cmd);
2816 		return (-1);
2817 	}
2818 
2819 	bzero(ci, sizeof (*ci));
2820 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2821 
2822 	dcmd->cmd = MFI_CMD_OP_DCMD;
2823 	dcmd->cmd_status = 0xFF;
2824 	dcmd->sge_count = 1;
2825 	dcmd->flags = MFI_FRAME_DIR_READ;
2826 	dcmd->timeout = 0;
2827 	dcmd->pad_0 = 0;
2828 	dcmd->data_xfer_len = size_map_info;
2829 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2830 	dcmd->sgl.sge32[0].phys_addr = ci_h;
2831 	dcmd->sgl.sge32[0].length = size_map_info;
2832 
2833 
2834 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2835 
2836 	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2837 		ret = 0;
2838 		con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2839 	} else {
2840 		dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2841 		ret = -1;
2842 	}
2843 
2844 	return_raid_msg_pkt(instance, cmd);
2845 
2846 	return (ret);
2847 }
2848 
2849 void
2850 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2851 {
2852 	uint32_t i;
2853 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2854 	union desc_value d_val;
2855 
2856 	reply_desc = instance->reply_frame_pool;
2857 
2858 	for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2859 		d_val.word = reply_desc->Words;
2860 		con_log(CL_DLEVEL3, (CE_NOTE,
2861 		    "i=%d, %x:%x",
2862 		    i, d_val.u1.high, d_val.u1.low));
2863 	}
2864 }
2865 
2866 /*
2867  * mrsas_tbolt_command_create -	Create command for fast path.
2868  * @io_info:	MegaRAID IO request packet pointer.
2869  * @ref_tag:	Reference tag for RD/WRPROTECT
2870  *
2871  * Create the command for fast path.
2872  */
2873 void
2874 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2875     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2876     U32 ref_tag)
2877 {
2878 	uint16_t		EEDPFlags;
2879 	uint32_t		Control;
2880 	ddi_acc_handle_t acc_handle =
2881 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2882 
2883 	/* Prepare 32-byte CDB if DIF is supported on this device */
2884 	con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2885 
2886 	bzero(cdb, 32);
2887 
2888 	cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2889 
2890 
2891 	cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2892 
2893 	if (io_info->isRead)
2894 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2895 	else
2896 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2897 
2898 	/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2899 	cdb[10] = MRSAS_RD_WR_PROTECT;
2900 
2901 	/* LOGICAL BLOCK ADDRESS */
2902 	cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2903 	cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2904 	cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2905 	cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2906 	cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2907 	cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2908 	cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2909 	cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2910 
2911 	/* Logical block reference tag */
2912 	ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2913 	    BE_32(ref_tag));
2914 
2915 	ddi_put16(acc_handle,
2916 	    &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2917 
2918 	ddi_put32(acc_handle, &scsi_io_request->DataLength,
2919 	    ((io_info->numBlocks)*512));
2920 	/* Specify 32-byte cdb */
2921 	ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2922 
2923 	/* Transfer length */
2924 	cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2925 	cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2926 	cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2927 	cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2928 
2929 	/* set SCSI IO EEDPFlags */
2930 	EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2931 	Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2932 
2933 	/* set SCSI IO EEDPFlags bits */
2934 	if (io_info->isRead) {
2935 		/*
2936 		 * For READ commands, the EEDPFlags shall be set to specify to
2937 		 * Increment the Primary Reference Tag, to Check the Reference
2938 		 * Tag, and to Check and Remove the Protection Information
2939 		 * fields.
2940 		 */
2941 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2942 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG	|
2943 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP	|
2944 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG	|
2945 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2946 	} else {
2947 		/*
2948 		 * For WRITE commands, the EEDPFlags shall be set to specify to
2949 		 * Increment the Primary Reference Tag, and to Insert
2950 		 * Protection Information fields.
2951 		 */
2952 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2953 		    MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2954 	}
2955 	Control |= (0x4 << 26);
2956 
2957 	ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2958 	ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2959 	ddi_put32(acc_handle,
2960 	    &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2961 }
2962 
2963 
2964 /*
2965  * mrsas_tbolt_set_pd_lba -	Sets PD LBA
2966  * @cdb:		CDB
2967  * @cdb_len:		cdb length
2968  * @start_blk:		Start block of IO
2969  *
2970  * Used to set the PD LBA in CDB for FP IOs
2971  */
2972 static void
2973 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2974     U32 num_blocks)
2975 {
2976 	U8 cdb_len = *cdb_len_ptr;
2977 	U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2978 
2979 	/* Some drives don't support 16/12 byte CDB's, convert to 10 */
2980 	if (((cdb_len == 12) || (cdb_len == 16)) &&
2981 	    (start_blk <= 0xffffffff)) {
2982 		if (cdb_len == 16) {
2983 			con_log(CL_ANN,
2984 			    (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2985 			opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2986 			flagvals = cdb[1];
2987 			groupnum = cdb[14];
2988 			control = cdb[15];
2989 		} else {
2990 			con_log(CL_ANN,
2991 			    (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2992 			opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2993 			flagvals = cdb[1];
2994 			groupnum = cdb[10];
2995 			control = cdb[11];
2996 		}
2997 
2998 		bzero(cdb, sizeof (cdb));
2999 
3000 		cdb[0] = opcode;
3001 		cdb[1] = flagvals;
3002 		cdb[6] = groupnum;
3003 		cdb[9] = control;
3004 		/* Set transfer length */
3005 		cdb[8] = (U8)(num_blocks & 0xff);
3006 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3007 		cdb_len = 10;
3008 	} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3009 		/* Convert to 16 byte CDB for large LBA's */
3010 		con_log(CL_ANN,
3011 		    (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3012 		switch (cdb_len) {
3013 		case 6:
3014 			opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3015 			control = cdb[5];
3016 			break;
3017 		case 10:
3018 			opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3019 			flagvals = cdb[1];
3020 			groupnum = cdb[6];
3021 			control = cdb[9];
3022 			break;
3023 		case 12:
3024 			opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3025 			flagvals = cdb[1];
3026 			groupnum = cdb[10];
3027 			control = cdb[11];
3028 			break;
3029 		}
3030 
3031 		bzero(cdb, sizeof (cdb));
3032 
3033 		cdb[0] = opcode;
3034 		cdb[1] = flagvals;
3035 		cdb[14] = groupnum;
3036 		cdb[15] = control;
3037 
3038 		/* Transfer length */
3039 		cdb[13] = (U8)(num_blocks & 0xff);
3040 		cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3041 		cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3042 		cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3043 
3044 		/* Specify 16-byte cdb */
3045 		cdb_len = 16;
3046 	} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3047 		/* convert to 10 byte CDB */
3048 		opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3049 		control = cdb[5];
3050 
3051 		bzero(cdb, sizeof (cdb));
3052 		cdb[0] = opcode;
3053 		cdb[9] = control;
3054 
3055 		/* Set transfer length */
3056 		cdb[8] = (U8)(num_blocks & 0xff);
3057 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3058 
3059 		/* Specify 10-byte cdb */
3060 		cdb_len = 10;
3061 	}
3062 
3063 
3064 	/* Fall through Normal case, just load LBA here */
3065 	switch (cdb_len) {
3066 	case 6:
3067 	{
3068 		U8 val = cdb[1] & 0xE0;
3069 		cdb[3] = (U8)(start_blk & 0xff);
3070 		cdb[2] = (U8)((start_blk >> 8) & 0xff);
3071 		cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3072 		break;
3073 	}
3074 	case 10:
3075 		cdb[5] = (U8)(start_blk & 0xff);
3076 		cdb[4] = (U8)((start_blk >> 8) & 0xff);
3077 		cdb[3] = (U8)((start_blk >> 16) & 0xff);
3078 		cdb[2] = (U8)((start_blk >> 24) & 0xff);
3079 		break;
3080 	case 12:
3081 		cdb[5]	  = (U8)(start_blk & 0xff);
3082 		cdb[4]	  = (U8)((start_blk >> 8) & 0xff);
3083 		cdb[3]	  = (U8)((start_blk >> 16) & 0xff);
3084 		cdb[2]	  = (U8)((start_blk >> 24) & 0xff);
3085 		break;
3086 
3087 	case 16:
3088 		cdb[9]	= (U8)(start_blk & 0xff);
3089 		cdb[8]	= (U8)((start_blk >> 8) & 0xff);
3090 		cdb[7]	= (U8)((start_blk >> 16) & 0xff);
3091 		cdb[6]	= (U8)((start_blk >> 24) & 0xff);
3092 		cdb[5]	= (U8)((start_blk >> 32) & 0xff);
3093 		cdb[4]	= (U8)((start_blk >> 40) & 0xff);
3094 		cdb[3]	= (U8)((start_blk >> 48) & 0xff);
3095 		cdb[2]	= (U8)((start_blk >> 56) & 0xff);
3096 		break;
3097 	}
3098 
3099 	*cdb_len_ptr = cdb_len;
3100 }
3101 
3102 
3103 static int
3104 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3105 {
3106 	MR_FW_RAID_MAP_ALL *ld_map;
3107 
3108 	if (!mrsas_tbolt_get_ld_map_info(instance)) {
3109 
3110 		ld_map = instance->ld_map[instance->map_id & 1];
3111 
3112 		con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3113 		    ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3114 
3115 		if (MR_ValidateMapInfo(
3116 		    instance->ld_map[instance->map_id & 1],
3117 		    instance->load_balance_info)) {
3118 			con_log(CL_ANN,
3119 			    (CE_CONT, "MR_ValidateMapInfo success"));
3120 
3121 			instance->fast_path_io = 1;
3122 			con_log(CL_ANN,
3123 			    (CE_NOTE, "instance->fast_path_io %d",
3124 			    instance->fast_path_io));
3125 
3126 			return (DDI_SUCCESS);
3127 		}
3128 
3129 	}
3130 
3131 	instance->fast_path_io = 0;
3132 	dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3133 	con_log(CL_ANN, (CE_NOTE,
3134 	    "instance->fast_path_io %d", instance->fast_path_io));
3135 
3136 	return (DDI_FAILURE);
3137 }
3138 
3139 /*
3140  * Marks HBA as bad. This will be called either when an
3141  * IO packet times out even after 3 FW resets
3142  * or FW is found to be fault even after 3 continuous resets.
3143  */
3144 
3145 void
3146 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3147 {
3148 	dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3149 
3150 	if (instance->deadadapter == 1)
3151 		return;
3152 
3153 	con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3154 	    "Writing to doorbell with MFI_STOP_ADP "));
3155 	mutex_enter(&instance->ocr_flags_mtx);
3156 	instance->deadadapter = 1;
3157 	mutex_exit(&instance->ocr_flags_mtx);
3158 	instance->func_ptr->disable_intr(instance);
3159 	WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3160 	/* Flush */
3161 	(void) RD_RESERVED0_REGISTER(instance);
3162 
3163 	(void) mrsas_print_pending_cmds(instance);
3164 	(void) mrsas_complete_pending_cmds(instance);
3165 }
3166 
3167 void
3168 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3169 {
3170 	int i;
3171 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3172 	instance->reply_read_index = 0;
3173 
3174 	/* initializing reply address to 0xFFFFFFFF */
3175 	reply_desc = instance->reply_frame_pool;
3176 
3177 	for (i = 0; i < instance->reply_q_depth; i++) {
3178 		reply_desc->Words = (uint64_t)~0;
3179 		reply_desc++;
3180 	}
3181 }
3182 
3183 int
3184 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3185 {
3186 	uint32_t status = 0x00;
3187 	uint32_t retry = 0;
3188 	uint32_t cur_abs_reg_val;
3189 	uint32_t fw_state;
3190 	uint32_t abs_state;
3191 	uint32_t i;
3192 
3193 	con_log(CL_ANN, (CE_NOTE,
3194 	    "mrsas_tbolt_reset_ppc entered"));
3195 
3196 	if (instance->deadadapter == 1) {
3197 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3198 		    "no more resets as HBA has been marked dead ");
3199 		return (DDI_FAILURE);
3200 	}
3201 
3202 	mutex_enter(&instance->ocr_flags_mtx);
3203 	instance->adapterresetinprogress = 1;
3204 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3205 	    "adpterresetinprogress flag set, time %llx", gethrtime()));
3206 	mutex_exit(&instance->ocr_flags_mtx);
3207 
3208 	instance->func_ptr->disable_intr(instance);
3209 
3210 	/* Add delay inorder to complete the ioctl & io cmds in-flight */
3211 	for (i = 0; i < 3000; i++) {
3212 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3213 	}
3214 
3215 	instance->reply_read_index = 0;
3216 
3217 retry_reset:
3218 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3219 	    ":Resetting TBOLT "));
3220 
3221 	WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3222 	WR_TBOLT_IB_WRITE_SEQ(4, instance);
3223 	WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3224 	WR_TBOLT_IB_WRITE_SEQ(2, instance);
3225 	WR_TBOLT_IB_WRITE_SEQ(7, instance);
3226 	WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3227 	con_log(CL_ANN1, (CE_NOTE,
3228 	    "mrsas_tbolt_reset_ppc: magic number written "
3229 	    "to write sequence register"));
3230 	delay(100 * drv_usectohz(MILLISEC));
3231 	status = RD_TBOLT_HOST_DIAG(instance);
3232 	con_log(CL_ANN1, (CE_NOTE,
3233 	    "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3234 	    "to write sequence register"));
3235 
3236 	while (status & DIAG_TBOLT_RESET_ADAPTER) {
3237 		delay(100 * drv_usectohz(MILLISEC));
3238 		status = RD_TBOLT_HOST_DIAG(instance);
3239 		if (retry++ == 100) {
3240 			dev_err(instance->dip, CE_WARN,
3241 			    "mrsas_tbolt_reset_ppc:"
3242 			    "resetadapter bit is set already "
3243 			    "check retry count %d", retry);
3244 			return (DDI_FAILURE);
3245 		}
3246 	}
3247 
3248 	WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3249 	delay(100 * drv_usectohz(MILLISEC));
3250 
3251 	ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3252 	    (uint8_t *)((uintptr_t)(instance)->regmap +
3253 	    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3254 
3255 	while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3256 		delay(100 * drv_usectohz(MILLISEC));
3257 		ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3258 		    (uint8_t *)((uintptr_t)(instance)->regmap +
3259 		    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3260 		if (retry++ == 100) {
3261 			/* Dont call kill adapter here */
3262 			/* RESET BIT ADAPTER is cleared by firmare */
3263 			/* mrsas_tbolt_kill_adapter(instance); */
3264 			dev_err(instance->dip, CE_WARN,
3265 			    "%s(): RESET FAILED; return failure!!!", __func__);
3266 			return (DDI_FAILURE);
3267 		}
3268 	}
3269 
3270 	con_log(CL_ANN,
3271 	    (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3272 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3273 	    "Calling mfi_state_transition_to_ready"));
3274 
3275 	abs_state = instance->func_ptr->read_fw_status_reg(instance);
3276 	retry = 0;
3277 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3278 		delay(100 * drv_usectohz(MILLISEC));
3279 		abs_state = instance->func_ptr->read_fw_status_reg(instance);
3280 	}
3281 	if (abs_state <= MFI_STATE_FW_INIT) {
3282 		dev_err(instance->dip, CE_WARN,
3283 		    "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3284 		    "state = 0x%x, RETRY RESET.", abs_state);
3285 		goto retry_reset;
3286 	}
3287 
3288 	/* Mark HBA as bad, if FW is fault after 3 continuous resets */
3289 	if (mfi_state_transition_to_ready(instance) ||
3290 	    debug_tbolt_fw_faults_after_ocr_g == 1) {
3291 		cur_abs_reg_val =
3292 		    instance->func_ptr->read_fw_status_reg(instance);
3293 		fw_state	= cur_abs_reg_val & MFI_STATE_MASK;
3294 
3295 		con_log(CL_ANN1, (CE_NOTE,
3296 		    "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3297 		    "FW state = 0x%x", fw_state));
3298 		if (debug_tbolt_fw_faults_after_ocr_g == 1)
3299 			fw_state = MFI_STATE_FAULT;
3300 
3301 		con_log(CL_ANN,
3302 		    (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3303 		    "FW state = 0x%x", fw_state));
3304 
3305 		if (fw_state == MFI_STATE_FAULT) {
3306 			/* increment the count */
3307 			instance->fw_fault_count_after_ocr++;
3308 			if (instance->fw_fault_count_after_ocr
3309 			    < MAX_FW_RESET_COUNT) {
3310 				dev_err(instance->dip, CE_WARN,
3311 				    "mrsas_tbolt_reset_ppc: "
3312 				    "FW is in fault after OCR count %d "
3313 				    "Retry Reset",
3314 				    instance->fw_fault_count_after_ocr);
3315 				goto retry_reset;
3316 
3317 			} else {
3318 				dev_err(instance->dip, CE_WARN, "%s:"
3319 				    "Max Reset Count exceeded >%d"
3320 				    "Mark HBA as bad, KILL adapter",
3321 				    __func__, MAX_FW_RESET_COUNT);
3322 
3323 				mrsas_tbolt_kill_adapter(instance);
3324 				return (DDI_FAILURE);
3325 			}
3326 		}
3327 	}
3328 
3329 	/* reset the counter as FW is up after OCR */
3330 	instance->fw_fault_count_after_ocr = 0;
3331 
3332 	mrsas_reset_reply_desc(instance);
3333 
3334 
3335 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3336 	    "Calling mrsas_issue_init_mpi2"));
3337 	abs_state = mrsas_issue_init_mpi2(instance);
3338 	if (abs_state == (uint32_t)DDI_FAILURE) {
3339 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3340 		    "INIT failed Retrying Reset");
3341 		goto retry_reset;
3342 	}
3343 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3344 	    "mrsas_issue_init_mpi2 Done"));
3345 
3346 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3347 	    "Calling mrsas_print_pending_cmd"));
3348 	(void) mrsas_print_pending_cmds(instance);
3349 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 	    "mrsas_print_pending_cmd done"));
3351 
3352 	instance->func_ptr->enable_intr(instance);
3353 	instance->fw_outstanding = 0;
3354 
3355 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3356 	    "Calling mrsas_issue_pending_cmds"));
3357 	(void) mrsas_issue_pending_cmds(instance);
3358 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3359 	"issue_pending_cmds done."));
3360 
3361 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3362 	    "Calling aen registration"));
3363 
3364 	instance->aen_cmd->retry_count_for_ocr = 0;
3365 	instance->aen_cmd->drv_pkt_time = 0;
3366 
3367 	instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3368 
3369 	con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3370 	mutex_enter(&instance->ocr_flags_mtx);
3371 	instance->adapterresetinprogress = 0;
3372 	mutex_exit(&instance->ocr_flags_mtx);
3373 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3374 	    "adpterresetinprogress flag unset"));
3375 
3376 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3377 	return (DDI_SUCCESS);
3378 
3379 }
3380 
3381 
3382 /*
3383  * mrsas_sync_map_info -	Returns FW's ld_map structure
3384  * @instance:				Adapter soft state
3385  *
3386  * Issues an internal command (DCMD) to get the FW's controller PD
3387  * list structure.  This information is mainly used to find out SYSTEM
3388  * supported by the FW.
3389  */
3390 
3391 static int
3392 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3393 {
3394 	int			ret = 0, i;
3395 	struct mrsas_cmd	*cmd = NULL;
3396 	struct mrsas_dcmd_frame	*dcmd;
3397 	uint32_t size_sync_info, num_lds;
3398 	LD_TARGET_SYNC *ci = NULL;
3399 	MR_FW_RAID_MAP_ALL *map;
3400 	MR_LD_RAID  *raid;
3401 	LD_TARGET_SYNC *ld_sync;
3402 	uint32_t ci_h = 0;
3403 	uint32_t size_map_info;
3404 
3405 	cmd = get_raid_msg_pkt(instance);
3406 
3407 	if (cmd == NULL) {
3408 		dev_err(instance->dip, CE_WARN,
3409 		    "Failed to get a cmd from free-pool in "
3410 		    "mrsas_tbolt_sync_map_info().");
3411 		return (DDI_FAILURE);
3412 	}
3413 
3414 	/* Clear the frame buffer and assign back the context id */
3415 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3416 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3417 	    cmd->index);
3418 	bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3419 
3420 
3421 	map = instance->ld_map[instance->map_id & 1];
3422 
3423 	num_lds = map->raidMap.ldCount;
3424 
3425 	dcmd = &cmd->frame->dcmd;
3426 
3427 	size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3428 
3429 	con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3430 	    size_sync_info, num_lds));
3431 
3432 	ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3433 
3434 	bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3435 	ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3436 
3437 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3438 
3439 	ld_sync = (LD_TARGET_SYNC *)ci;
3440 
3441 	for (i = 0; i < num_lds; i++, ld_sync++) {
3442 		raid = MR_LdRaidGet(i, map);
3443 
3444 		con_log(CL_ANN1,
3445 		    (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3446 		    i, raid->seqNum, raid->flags.ldSyncRequired));
3447 
3448 		ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3449 
3450 		con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3451 		    i, ld_sync->ldTargetId));
3452 
3453 		ld_sync->seqNum = raid->seqNum;
3454 	}
3455 
3456 
3457 	size_map_info = sizeof (MR_FW_RAID_MAP) +
3458 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3459 
3460 	dcmd->cmd = MFI_CMD_OP_DCMD;
3461 	dcmd->cmd_status = 0xFF;
3462 	dcmd->sge_count = 1;
3463 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3464 	dcmd->timeout = 0;
3465 	dcmd->pad_0 = 0;
3466 	dcmd->data_xfer_len = size_map_info;
3467 	ASSERT(num_lds <= 255);
3468 	dcmd->mbox.b[0] = (U8)num_lds;
3469 	dcmd->mbox.b[1] = 1; /* Pend */
3470 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3471 	dcmd->sgl.sge32[0].phys_addr = ci_h;
3472 	dcmd->sgl.sge32[0].length = size_map_info;
3473 
3474 
3475 	instance->map_update_cmd = cmd;
3476 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3477 
3478 	instance->func_ptr->issue_cmd(cmd, instance);
3479 
3480 	instance->unroll.syncCmd = 1;
3481 	con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3482 
3483 	return (ret);
3484 }
3485 
3486 /*
3487  * abort_syncmap_cmd
3488  */
3489 int
3490 abort_syncmap_cmd(struct mrsas_instance *instance,
3491     struct mrsas_cmd *cmd_to_abort)
3492 {
3493 	int	ret = 0;
3494 
3495 	struct mrsas_cmd		*cmd;
3496 	struct mrsas_abort_frame	*abort_fr;
3497 
3498 	con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3499 
3500 	cmd = get_raid_msg_mfi_pkt(instance);
3501 
3502 	if (!cmd) {
3503 		dev_err(instance->dip, CE_WARN,
3504 		    "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3505 		return (DDI_FAILURE);
3506 	}
3507 	/* Clear the frame buffer and assign back the context id */
3508 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3509 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3510 	    cmd->index);
3511 
3512 	abort_fr = &cmd->frame->abort;
3513 
3514 	/* prepare and issue the abort frame */
3515 	ddi_put8(cmd->frame_dma_obj.acc_handle,
3516 	    &abort_fr->cmd, MFI_CMD_OP_ABORT);
3517 	ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3518 	    MFI_CMD_STATUS_SYNC_MODE);
3519 	ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3520 	ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3521 	    cmd_to_abort->index);
3522 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3523 	    &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3524 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3525 	    &abort_fr->abort_mfi_phys_addr_hi, 0);
3526 
3527 	cmd->frame_count = 1;
3528 
3529 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3530 
3531 	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3532 		con_log(CL_ANN1, (CE_WARN,
3533 		    "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3534 		ret = -1;
3535 	} else {
3536 		ret = 0;
3537 	}
3538 
3539 	return_raid_msg_mfi_pkt(instance, cmd);
3540 
3541 	atomic_add_16(&instance->fw_outstanding, (-1));
3542 
3543 	return (ret);
3544 }
3545 
3546 
3547 #ifdef PDSUPPORT
3548 /*
3549  * Even though these functions were originally intended for 2208 only, it
3550  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3551  * these two functions would be either in mr_sas.c, or in their own new source
3552  * file.  Since this driver needs some cleanup anyway, keep this portion in
3553  * mind as well.
3554  */
3555 
3556 int
3557 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3558     uint8_t lun, dev_info_t **ldip)
3559 {
3560 	struct scsi_device *sd;
3561 	dev_info_t *child;
3562 	int rval, dtype;
3563 	struct mrsas_tbolt_pd_info *pds = NULL;
3564 
3565 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3566 	    tgt, lun));
3567 
3568 	if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3569 		if (ldip) {
3570 			*ldip = child;
3571 		}
3572 		if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3573 			rval = mrsas_service_evt(instance, tgt, 1,
3574 			    MRSAS_EVT_UNCONFIG_TGT, NULL);
3575 			con_log(CL_ANN1, (CE_WARN,
3576 			    "mr_sas:DELETING STALE ENTRY  rval = %d "
3577 			    "tgt id = %d", rval, tgt));
3578 			return (NDI_FAILURE);
3579 		}
3580 		return (NDI_SUCCESS);
3581 	}
3582 
3583 	pds = (struct mrsas_tbolt_pd_info *)
3584 	    kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3585 	mrsas_tbolt_get_pd_info(instance, pds, tgt);
3586 	dtype = pds->scsiDevType;
3587 
3588 	/* Check for Disk */
3589 	if ((dtype == DTYPE_DIRECT)) {
3590 		if ((dtype == DTYPE_DIRECT) &&
3591 		    (LE_16(pds->fwState) != PD_SYSTEM)) {
3592 			kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3593 			return (NDI_FAILURE);
3594 		}
3595 		sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3596 		sd->sd_address.a_hba_tran = instance->tran;
3597 		sd->sd_address.a_target = (uint16_t)tgt;
3598 		sd->sd_address.a_lun = (uint8_t)lun;
3599 
3600 		if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3601 			rval = mrsas_config_scsi_device(instance, sd, ldip);
3602 			dev_err(instance->dip, CE_CONT,
3603 			    "?Phys. device found: tgt %d dtype %d: %s\n",
3604 			    tgt, dtype, sd->sd_inq->inq_vid);
3605 		} else {
3606 			rval = NDI_FAILURE;
3607 			con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3608 			    "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3609 			    tgt, dtype, sd->sd_inq->inq_vid));
3610 		}
3611 
3612 		/* sd_unprobe is blank now. Free buffer manually */
3613 		if (sd->sd_inq) {
3614 			kmem_free(sd->sd_inq, SUN_INQSIZE);
3615 			sd->sd_inq = (struct scsi_inquiry *)NULL;
3616 		}
3617 		kmem_free(sd, sizeof (struct scsi_device));
3618 	} else {
3619 		con_log(CL_ANN1, (CE_NOTE,
3620 		    "?Device not supported: tgt %d lun %d dtype %d",
3621 		    tgt, lun, dtype));
3622 		rval = NDI_FAILURE;
3623 	}
3624 
3625 	kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3626 	con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3627 	    rval));
3628 	return (rval);
3629 }
3630 
3631 static void
3632 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3633     struct mrsas_tbolt_pd_info *pds, int tgt)
3634 {
3635 	struct mrsas_cmd	*cmd;
3636 	struct mrsas_dcmd_frame	*dcmd;
3637 	dma_obj_t		dcmd_dma_obj;
3638 
3639 	ASSERT(instance->tbolt || instance->skinny);
3640 
3641 	if (instance->tbolt)
3642 		cmd = get_raid_msg_pkt(instance);
3643 	else
3644 		cmd = mrsas_get_mfi_pkt(instance);
3645 
3646 	if (!cmd) {
3647 		con_log(CL_ANN1,
3648 		    (CE_WARN, "Failed to get a cmd for get pd info"));
3649 		return;
3650 	}
3651 
3652 	/* Clear the frame buffer and assign back the context id */
3653 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3654 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3655 	    cmd->index);
3656 
3657 
3658 	dcmd = &cmd->frame->dcmd;
3659 	dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3660 	dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3661 	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3662 	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3663 	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3664 	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3665 
3666 	(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3667 	    DDI_STRUCTURE_LE_ACC);
3668 	bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3669 	bzero(dcmd->mbox.b, 12);
3670 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3671 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3672 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3673 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3674 	    MFI_FRAME_DIR_READ);
3675 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3676 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3677 	    sizeof (struct mrsas_tbolt_pd_info));
3678 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3679 	    MR_DCMD_PD_GET_INFO);
3680 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3681 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3682 	    sizeof (struct mrsas_tbolt_pd_info));
3683 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3684 	    dcmd_dma_obj.dma_cookie[0].dmac_address);
3685 
3686 	cmd->sync_cmd = MRSAS_TRUE;
3687 	cmd->frame_count = 1;
3688 
3689 	if (instance->tbolt)
3690 		mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3691 
3692 	instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3693 
3694 	ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3695 	    (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3696 	    DDI_DEV_AUTOINCR);
3697 	(void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3698 
3699 	if (instance->tbolt)
3700 		return_raid_msg_pkt(instance, cmd);
3701 	else
3702 		mrsas_return_mfi_pkt(instance, cmd);
3703 }
3704 #endif
3705