xref: /freebsd/sys/dev/smartpqi/smartpqi_init.c (revision 1d386b48)
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 
27 #include "smartpqi_includes.h"
28 
29 /* 5 mins timeout for quiesce */
30 #define PQI_QUIESCE_TIMEOUT	300000
31 
32 /*
33  * Request the adapter to get PQI capabilities supported.
34  */
35 static int
36 pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
37 {
38 	int ret = PQI_STATUS_SUCCESS;
39 
40 	DBG_FUNC("IN\n");
41 
42 	gen_adm_req_iu_t	admin_req;
43 	gen_adm_resp_iu_t 	admin_resp;
44 	dma_mem_t		pqi_cap_dma_buf;
45 	pqi_dev_cap_t 		*capability = NULL;
46 	pqi_iu_layer_desc_t	*iu_layer_desc = NULL;
47 
48 	/* Allocate Non DMA memory */
49 	capability = os_mem_alloc(softs, sizeof(*capability));
50 	if (!capability) {
51 		DBG_ERR("Failed to allocate memory for capability\n");
52 		ret = PQI_STATUS_FAILURE;
53 		goto err_out;
54 	}
55 
56 	memset(&admin_req, 0, sizeof(admin_req));
57 	memset(&admin_resp, 0, sizeof(admin_resp));
58 
59 	memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
60 	pqi_cap_dma_buf.tag = "pqi_cap_buf";
61 	pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
62 	pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
63 
64 	ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
65 	if (ret) {
66 		DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
67 		goto err_dma_alloc;
68 	}
69 
70 	admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
71 	admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
72 	admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
73 	admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
74 	admin_req.req_type.general_func.sg_desc.type =	SGL_DESCRIPTOR_CODE_DATA_BLOCK;
75 
76 	ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
77 	if( PQI_STATUS_SUCCESS == ret) {
78                 memcpy(capability,
79 			pqi_cap_dma_buf.virt_addr,
80 			pqi_cap_dma_buf.size);
81 	} else {
82 		DBG_ERR("Failed to send admin req report pqi device capability\n");
83 		goto err_admin_req;
84 
85 	}
86 
87 	softs->pqi_dev_cap.max_iqs = capability->max_iqs;
88 	softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
89 	softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
90 	softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
91 	softs->pqi_dev_cap.max_oqs = capability->max_oqs;
92 	softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
93 	softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
94 	softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
95 
96 	iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
97 	softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
98 	softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
99 	softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
100 
101 	DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
102 	DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
103 	DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
104 	DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
105 	DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
106 	DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
107 	DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
108 	DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
109 	DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
110 	DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
111 	DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
112 
113 
114 	os_mem_free(softs, (void *)capability,
115 		    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
116 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
117 
118 	DBG_FUNC("OUT\n");
119 	return ret;
120 
121 err_admin_req:
122 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
123 err_dma_alloc:
124 	if (capability)
125 		os_mem_free(softs, (void *)capability,
126 			    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
127 err_out:
128 	DBG_FUNC("failed OUT\n");
129 	return PQI_STATUS_FAILURE;
130 }
131 
132 /*
133  * Function used to deallocate the used rcb.
134  */
135 void
136 pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
137 {
138 	uint32_t num_req;
139 	size_t size;
140 	int i;
141 
142 	DBG_FUNC("IN\n");
143 	num_req = softs->max_outstanding_io + 1;
144 	size = num_req * sizeof(rcb_t);
145 	for (i = 1; i < req_count; i++)
146 		os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
147 	os_mem_free(softs, (void *)softs->rcb, size);
148 	softs->rcb = NULL;
149 	DBG_FUNC("OUT\n");
150 }
151 
152 
153 /*
154  * Allocate memory for rcb and SG descriptors.
155  */
156 static int
157 pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
158 {
159 	int ret = PQI_STATUS_SUCCESS;
160 	int i = 0;
161 	uint32_t num_req = 0;
162 	uint32_t sg_buf_size = 0;
163 	uint64_t alloc_size = 0;
164 	rcb_t *rcb = NULL;
165 	rcb_t *prcb = NULL;
166 	DBG_FUNC("IN\n");
167 
168 	/* Set maximum outstanding requests */
169 	/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
170 	 * The rcb will be accessed by using the tag as index
171 	 * As 0 tag index is not used, we need to allocate one extra.
172 	 */
173 	softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
174 	num_req = softs->max_outstanding_io + 1;
175 	DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
176 
177 	alloc_size = num_req * sizeof(rcb_t);
178 
179 	/* Allocate Non DMA memory */
180 	rcb = os_mem_alloc(softs, alloc_size);
181 	if (!rcb) {
182 		DBG_ERR("Failed to allocate memory for rcb\n");
183 		ret = PQI_STATUS_FAILURE;
184 		goto err_out;
185 	}
186 	softs->rcb = rcb;
187 
188 	/* Allocate sg dma memory for sg chain  */
189 	sg_buf_size = softs->pqi_cap.max_sg_elem *
190 			sizeof(sgt_t);
191 
192 	prcb = &softs->rcb[1];
193 	/* Initialize rcb */
194 	for(i=1; i < num_req; i++) {
195 		char tag[15];
196 		sprintf(tag, "sg_dma_buf%d", i);
197 		softs->sg_dma_desc[i].tag = tag;
198 		softs->sg_dma_desc[i].size = sg_buf_size;
199 		softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
200 
201 		ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
202 		if (ret) {
203 			DBG_ERR("Failed to Allocate sg desc %d\n", ret);
204 			ret = PQI_STATUS_FAILURE;
205 			goto error;
206 		}
207 		prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
208 		prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
209 		prcb ++;
210 	}
211 
212 	DBG_FUNC("OUT\n");
213 	return ret;
214 error:
215 	pqisrc_free_rcb(softs, i);
216 err_out:
217 	DBG_FUNC("failed OUT\n");
218 	return ret;
219 }
220 
221 /*
222  * Function used to decide the operational queue configuration params
223  * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
224  */
225 void
226 pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
227 {
228 	uint16_t total_iq_elements;
229 
230 	DBG_FUNC("IN\n");
231 
232 	DBG_INIT("softs->intr_count : %d  softs->num_cpus_online : %d",
233 		softs->intr_count, softs->num_cpus_online);
234 
235 	if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
236 		/* Share the event and Operational queue. */
237 		softs->num_op_obq = 1;
238 		softs->share_opq_and_eventq = true;
239 	}
240 	else {
241 		/* Note :  One OBQ (OBQ0) reserved for event queue */
242 		softs->num_op_obq = MIN(softs->num_cpus_online,
243 					softs->intr_count) - 1;
244 		softs->share_opq_and_eventq = false;
245 	}
246 	/* If the available interrupt count is more than one,
247 	we dont need to share the interrupt for IO and event queue */
248 	if (softs->intr_count > 1)
249 		softs->share_opq_and_eventq = false;
250 
251 	DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
252 
253 	softs->num_op_raid_ibq = softs->num_op_obq;
254 	softs->num_op_aio_ibq = softs->num_op_raid_ibq;
255 	softs->ibq_elem_size =  softs->pqi_dev_cap.max_iq_elem_len * 16;
256 	softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
257 	if (softs->max_ib_iu_length_per_fw == 256 &&
258 	    softs->ob_spanning_supported) {
259 		/* older f/w that doesn't actually support spanning. */
260 		softs->max_ib_iu_length = softs->ibq_elem_size;
261 	} else {
262 		/* max. inbound IU length is an multiple of our inbound element size. */
263 		softs->max_ib_iu_length =
264 			(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
265 			 softs->ibq_elem_size;
266 
267 	}
268 	/* If Max. Outstanding IO came with Max. Spanning element count then,
269 		needed elements per IO are multiplication of
270 		Max.Outstanding IO and  Max.Spanning element */
271 	total_iq_elements = (softs->max_outstanding_io *
272 		(softs->max_ib_iu_length / softs->ibq_elem_size));
273 
274 	softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
275 	softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
276 		softs->pqi_dev_cap.max_iq_elements);
277 
278 	softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
279 	softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
280 		softs->pqi_dev_cap.max_oq_elements);
281 
282 	softs->max_sg_per_iu = ((softs->max_ib_iu_length -
283 				softs->ibq_elem_size) /
284 				sizeof(sgt_t)) +
285 				MAX_EMBEDDED_SG_IN_FIRST_IU;
286 
287 	DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
288 	DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
289 	DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
290 	DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
291 
292 	DBG_FUNC("OUT\n");
293 }
294 
295 /*
296  * Configure the operational queue parameters.
297  */
298 int
299 pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
300 {
301 	int ret = PQI_STATUS_SUCCESS;
302 
303 	/* Get the PQI capability,
304 		REPORT PQI DEVICE CAPABILITY request */
305 	ret = pqisrc_report_pqi_capability(softs);
306 	if (ret) {
307 		DBG_ERR("Failed to send report pqi dev capability request : %d\n",
308 				ret);
309 		goto err_out;
310 	}
311 
312 	/* Reserve required no of slots for internal requests */
313 	softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
314 
315 	/* Decide the Op queue configuration */
316 	pqisrc_decide_opq_config(softs);
317 
318 	DBG_FUNC("OUT\n");
319 	return ret;
320 
321 err_out:
322 	DBG_FUNC("OUT failed\n");
323 	return ret;
324 }
325 
326 /*
327  * Validate the PQI mode of adapter.
328  */
329 int
330 pqisrc_check_pqimode(pqisrc_softstate_t *softs)
331 {
332 	int ret = PQI_STATUS_FAILURE;
333 	int tmo = 0;
334 	uint64_t signature = 0;
335 
336 	DBG_FUNC("IN\n");
337 
338 	/* Check the PQI device signature */
339 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
340 	do {
341 		signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
342 
343 		if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
344 				sizeof(uint64_t)) == 0) {
345 			ret = PQI_STATUS_SUCCESS;
346 			break;
347 		}
348 		OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
349 	} while (tmo--);
350 
351 	PRINT_PQI_SIGNATURE(signature);
352 
353 	if (tmo <= 0) {
354 		DBG_ERR("PQI Signature is invalid\n");
355 		ret = PQI_STATUS_TIMEOUT;
356 		goto err_out;
357 	}
358 
359 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
360 	/* Check function and status code for the device */
361 	COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
362 		PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
363 	if (!tmo) {
364 		DBG_ERR("PQI device is not in IDLE state\n");
365 		ret = PQI_STATUS_TIMEOUT;
366 		goto err_out;
367 	}
368 
369 
370 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
371 	/* Check the PQI device status register */
372 	COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
373 				PQI_DEV_STATE_AT_INIT, tmo);
374 	if (!tmo) {
375 		DBG_ERR("PQI Registers are not ready\n");
376 		ret = PQI_STATUS_TIMEOUT;
377 		goto err_out;
378 	}
379 
380 	DBG_FUNC("OUT\n");
381 	return ret;
382 err_out:
383 	DBG_FUNC("OUT failed\n");
384 	return ret;
385 }
386 
387 /* PQI Feature processing */
388 static int
389 pqisrc_config_table_update(struct pqisrc_softstate *softs,
390 	uint16_t first_section, uint16_t last_section)
391 {
392 	pqi_vendor_general_request_t request;
393 	int ret = PQI_STATUS_FAILURE;
394 
395 	memset(&request, 0, sizeof(request));
396 
397 	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
398 	request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
399 	request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
400 	request.data.config_table_update.first_section = first_section;
401 	request.data.config_table_update.last_section = last_section;
402 
403 	ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
404 
405 	if (ret != PQI_STATUS_SUCCESS) {
406 		DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
407 		return PQI_STATUS_FAILURE;
408 	}
409 
410 	return PQI_STATUS_SUCCESS;
411 }
412 
413 static inline
414 boolean_t pqi_is_firmware_feature_supported(
415 	struct pqi_conf_table_firmware_features *firmware_feature_list,
416 	unsigned int bit_position)
417 {
418 	unsigned int byte_index;
419 
420 	byte_index = bit_position / BITS_PER_BYTE;
421 
422 	if (byte_index >= firmware_feature_list->num_elements)
423 		return false;
424 
425 	return firmware_feature_list->features_supported[byte_index] &
426 		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
427 }
428 
429 static inline
430 boolean_t pqi_is_firmware_feature_enabled(
431 	struct pqi_conf_table_firmware_features *firmware_feature_list,
432 	uint8_t *firmware_features_addr, unsigned int bit_position)
433 {
434 	unsigned int byte_index;
435 	uint8_t *feature_enabled_addr;
436 
437 	byte_index = (bit_position / BITS_PER_BYTE) +
438 		(firmware_feature_list->num_elements * 2);
439 
440 	feature_enabled_addr = firmware_features_addr +
441 		offsetof(struct pqi_conf_table_firmware_features,
442 			features_supported) + byte_index;
443 
444 	return *feature_enabled_addr &
445 		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
446 }
447 
448 static inline void
449 pqi_request_firmware_feature(
450 	struct pqi_conf_table_firmware_features *firmware_feature_list,
451 	unsigned int bit_position)
452 {
453 	unsigned int byte_index;
454 
455 	byte_index = (bit_position / BITS_PER_BYTE) +
456 		firmware_feature_list->num_elements;
457 
458 	firmware_feature_list->features_supported[byte_index] |=
459 		(1 << (bit_position % BITS_PER_BYTE));
460 }
461 
462 /* Update PQI config table firmware features section and inform the firmware */
463 static int
464 pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
465 	struct pqi_conf_table_firmware_features *firmware_feature_list)
466 {
467 	uint8_t *request_feature_addr;
468 	void *request_feature_abs_addr;
469 
470 	request_feature_addr = firmware_feature_list->features_supported +
471 		firmware_feature_list->num_elements;
472 	request_feature_abs_addr = softs->fw_features_section_abs_addr +
473 		(request_feature_addr - (uint8_t*)firmware_feature_list);
474 
475 	os_io_memcpy(request_feature_abs_addr, request_feature_addr,
476 			firmware_feature_list->num_elements);
477 
478 	return pqisrc_config_table_update(softs,
479 		PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
480 		PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
481 }
482 
483 /* Check firmware has enabled the feature specified in the respective bit position. */
484 inline boolean_t
485 pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
486 		struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
487 {
488 	uint16_t byte_index;
489 	uint8_t *features_enabled_abs_addr;
490 
491 	byte_index = (bit_position / BITS_PER_BYTE) +
492 		(firmware_feature_list->num_elements * 2);
493 
494 	features_enabled_abs_addr = softs->fw_features_section_abs_addr +
495 	offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
496 
497 	return *features_enabled_abs_addr &
498 		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
499 }
500 
501 static void
502 pqi_firmware_feature_status(struct pqisrc_softstate	*softs,
503 	struct pqi_firmware_feature *firmware_feature)
504 {
505 	switch(firmware_feature->feature_bit) {
506 	case PQI_FIRMWARE_FEATURE_OFA:
507 		break;
508 	case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
509 		softs->timeout_in_passthrough = true;
510 		break;
511 	case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
512 		softs->timeout_in_tmf = true;
513 		break;
514 	default:
515 		DBG_NOTE("Nothing to do \n");
516 	}
517 }
518 
519 /* Firmware features supported by the driver */
520 static struct
521 pqi_firmware_feature pqi_firmware_features[] = {
522 	{
523 		.feature_name = "Support timeout for pass-through commands",
524 		.feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
525 		.feature_status = pqi_firmware_feature_status,
526 	},
527 	{
528 		.feature_name = "Support timeout for LUN Reset TMF",
529 		.feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
530 		.feature_status = pqi_firmware_feature_status,
531 	}
532 };
533 
534 static void
535 pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
536 {
537 	int rc;
538 	struct pqi_conf_table_firmware_features *firmware_feature_list;
539 	unsigned int i;
540 	unsigned int num_features_requested;
541 
542 	firmware_feature_list = (struct pqi_conf_table_firmware_features*)
543 		softs->fw_features_section_abs_addr;
544 
545 	/* Check features and request those supported by firmware and driver.*/
546 	for (i = 0, num_features_requested = 0;
547 		i < ARRAY_SIZE(pqi_firmware_features); i++) {
548 		/* Firmware support it ? */
549 		if (pqi_is_firmware_feature_supported(firmware_feature_list,
550 				pqi_firmware_features[i].feature_bit)) {
551 			pqi_request_firmware_feature(firmware_feature_list,
552 				pqi_firmware_features[i].feature_bit);
553 			pqi_firmware_features[i].supported = true;
554 			num_features_requested++;
555 			DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
556 					pqi_firmware_features[i].feature_name);
557 		} else {
558 			DBG_NOTE("%s supported by driver, but not by current firmware\n",
559 					pqi_firmware_features[i].feature_name);
560 		}
561 	}
562 	if (num_features_requested == 0)
563 		return;
564 
565 	rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
566 	if (rc) {
567 		DBG_ERR("Failed to update pqi config table\n");
568 		return;
569 	}
570 
571 	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
572 		if (pqi_is_firmware_feature_enabled(firmware_feature_list,
573 			softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
574 			pqi_firmware_features[i].enabled = true;
575 			DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
576 			if(pqi_firmware_features[i].feature_status)
577 				pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
578 		}
579 	}
580 }
581 
582 /*
583  * Get the PQI configuration table parameters.
584  * Currently using for heart-beat counter scratch-pad register.
585  */
586 int
587 pqisrc_process_config_table(pqisrc_softstate_t *softs)
588 {
589 	int ret = PQI_STATUS_FAILURE;
590 	uint32_t config_table_size;
591 	uint32_t section_off;
592 	uint8_t *config_table_abs_addr __unused;
593 	struct pqi_conf_table *conf_table;
594 	struct pqi_conf_table_section_header *section_hdr;
595 
596 	config_table_size = softs->pqi_cap.conf_tab_sz;
597 
598 	if (config_table_size < sizeof(*conf_table) ||
599 		config_table_size > PQI_CONF_TABLE_MAX_LEN) {
600 		DBG_ERR("Invalid PQI conf table length of %u\n",
601 			config_table_size);
602 		return ret;
603 	}
604 
605 	conf_table = os_mem_alloc(softs, config_table_size);
606 	if (!conf_table) {
607 		DBG_ERR("Failed to allocate memory for PQI conf table\n");
608 		return ret;
609 	}
610 
611 	if (config_table_size < sizeof(conf_table) ||
612 		config_table_size > PQI_CONF_TABLE_MAX_LEN) {
613 		DBG_ERR("Invalid PQI conf table length of %u\n",
614 			config_table_size);
615 		goto out;
616 	}
617 
618 	config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
619 					softs->pqi_cap.conf_tab_off);
620 
621 	PCI_MEM_GET_BUF(softs, config_table_abs_addr,
622 			softs->pqi_cap.conf_tab_off,
623 			(uint8_t*)conf_table, config_table_size);
624 
625 
626 	if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
627 			sizeof(conf_table->sign)) != 0) {
628 		DBG_ERR("Invalid PQI config signature\n");
629 		goto out;
630 	}
631 
632 	section_off = LE_32(conf_table->first_section_off);
633 
634 	while (section_off) {
635 
636 		if (section_off+ sizeof(*section_hdr) >= config_table_size) {
637 			DBG_INFO("Reached end of PQI config table. Breaking off.\n");
638 			break;
639 		}
640 
641 		section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
642 
643 		switch (LE_16(section_hdr->section_id)) {
644 		case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
645 		case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
646 		case PQI_CONF_TABLE_SECTION_DEBUG:
647 			break;
648 		case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
649 			softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
650 			softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
651 			pqisrc_process_firmware_features(softs);
652 		break;
653 		case PQI_CONF_TABLE_SECTION_HEARTBEAT:
654 		softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
655 						section_off +
656 						offsetof(struct pqi_conf_table_heartbeat,
657 						heartbeat_counter);
658 		softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
659 							softs->heartbeat_counter_off);
660 		ret = PQI_STATUS_SUCCESS;
661 		break;
662 		default:
663 		DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
664 					LE_16(section_hdr->section_id));
665 		break;
666 		}
667 		section_off = LE_16(section_hdr->next_section_off);
668 	}
669 out:
670 	os_mem_free(softs, (void *)conf_table,config_table_size);
671 	return ret;
672 }
673 
674 /* Wait for PQI reset completion for the adapter*/
675 int
676 pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
677 {
678 	int ret = PQI_STATUS_SUCCESS;
679 	pqi_reset_reg_t reset_reg;
680 	int pqi_reset_timeout = 0;
681 	uint64_t val = 0;
682 	uint32_t max_timeout = 0;
683 
684 	val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
685 
686 	max_timeout = (val & 0xFFFF00000000) >> 32;
687 
688 	DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
689 
690 	while(1) {
691 		if (pqi_reset_timeout++ == max_timeout) {
692 			return PQI_STATUS_TIMEOUT;
693 		}
694 		OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
695 		reset_reg.all_bits = PCI_MEM_GET32(softs,
696 			&softs->pqi_reg->dev_reset, PQI_DEV_RESET);
697 		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
698 			break;
699 	}
700 
701 	return ret;
702 }
703 
704 /*
705  * Function used to perform PQI hard reset.
706  */
707 int
708 pqi_reset(pqisrc_softstate_t *softs)
709 {
710 	int ret = PQI_STATUS_SUCCESS;
711 	uint32_t val = 0;
712 	pqi_reset_reg_t pqi_reset_reg;
713 
714 	DBG_FUNC("IN\n");
715 
716 	if (true == softs->ctrl_in_pqi_mode) {
717 
718 		if (softs->pqi_reset_quiesce_allowed) {
719 			val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
720 					LEGACY_SIS_IDBR);
721 			val |= SIS_PQI_RESET_QUIESCE;
722 			PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
723 					LEGACY_SIS_IDBR, LE_32(val));
724 			ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
725 			if (ret) {
726 				DBG_ERR("failed with error %d during quiesce\n", ret);
727 				return ret;
728 			}
729 		}
730 
731 		pqi_reset_reg.all_bits = 0;
732 		pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
733 		pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
734 
735 		PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
736 			LE_32(pqi_reset_reg.all_bits));
737 
738 		ret = pqisrc_wait_for_pqi_reset_completion(softs);
739 		if (ret) {
740 			DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
741 			return ret;
742 		}
743 	}
744 	softs->ctrl_in_pqi_mode = false;
745 	DBG_FUNC("OUT\n");
746 	return ret;
747 }
748 
749 /*
750  * Initialize the adapter with supported PQI configuration.
751  */
752 int
753 pqisrc_pqi_init(pqisrc_softstate_t *softs)
754 {
755 	int ret = PQI_STATUS_SUCCESS;
756 
757 	DBG_FUNC("IN\n");
758 
759 	/* Check the PQI signature */
760 	ret = pqisrc_check_pqimode(softs);
761 	if(ret) {
762 		DBG_ERR("failed to switch to pqi\n");
763                 goto err_out;
764 	}
765 
766 	PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
767 	softs->ctrl_in_pqi_mode = true;
768 
769 	/* Get the No. of Online CPUs,NUMA/Processor config from OS */
770 	ret = os_get_processor_config(softs);
771 	if (ret) {
772 		DBG_ERR("Failed to get processor config from OS %d\n",
773 			ret);
774 		goto err_out;
775 	}
776 
777 	softs->intr_type = INTR_TYPE_NONE;
778 
779 	/* Get the interrupt count, type, priority available from OS */
780 	ret = os_get_intr_config(softs);
781 	if (ret) {
782 		DBG_ERR("Failed to get interrupt config from OS %d\n",
783 			ret);
784 		goto err_out;
785 	}
786 
787 	/*Enable/Set Legacy INTx Interrupt mask clear pqi register,
788 	 *if allocated interrupt is legacy type.
789 	 */
790 	if (INTR_TYPE_FIXED == softs->intr_type) {
791 		pqisrc_configure_legacy_intx(softs, true);
792 		sis_enable_intx(softs);
793 	}
794 
795 	/* Create Admin Queue pair*/
796 	ret = pqisrc_create_admin_queue(softs);
797 	if(ret) {
798                 DBG_ERR("Failed to configure admin queue\n");
799                 goto err_admin_queue;
800     	}
801 
802 	/* For creating event and IO operational queues we have to submit
803 	   admin IU requests.So Allocate resources for submitting IUs */
804 
805 	/* Allocate the request container block (rcb) */
806 	ret = pqisrc_allocate_rcb(softs);
807 	if (ret == PQI_STATUS_FAILURE) {
808                 DBG_ERR("Failed to allocate rcb \n");
809                 goto err_rcb;
810     	}
811 
812 	/* Allocate & initialize request id queue */
813 	ret = pqisrc_init_taglist(softs,&softs->taglist,
814 				softs->max_outstanding_io);
815 	if (ret) {
816 		DBG_ERR("Failed to allocate memory for request id q : %d\n",
817 			ret);
818 		goto err_taglist;
819 	}
820 
821 	ret = pqisrc_configure_op_queues(softs);
822 	if (ret) {
823 			DBG_ERR("Failed to configure op queue\n");
824 			goto err_config_opq;
825 	}
826 
827 	/* Create Operational queues */
828 	ret = pqisrc_create_op_queues(softs);
829 	if(ret) {
830                 DBG_ERR("Failed to create op queue\n");
831                 ret = PQI_STATUS_FAILURE;
832                 goto err_create_opq;
833         }
834 
835 	softs->ctrl_online = true;
836 
837 	DBG_FUNC("OUT\n");
838 	return ret;
839 
840 err_create_opq:
841 err_config_opq:
842 	pqisrc_destroy_taglist(softs,&softs->taglist);
843 err_taglist:
844 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
845 err_rcb:
846 	pqisrc_destroy_admin_queue(softs);
847 err_admin_queue:
848 	os_free_intr_config(softs);
849 err_out:
850 	DBG_FUNC("OUT failed\n");
851 	return PQI_STATUS_FAILURE;
852 }
853 
854 int
855 pqisrc_force_sis(pqisrc_softstate_t *softs)
856 {
857 	int ret = PQI_STATUS_SUCCESS;
858 
859 	if (SIS_IS_KERNEL_PANIC(softs)) {
860 		DBG_INIT("Controller FW is not running");
861 		return PQI_STATUS_FAILURE;
862 	}
863 
864 	if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
865 		return ret;
866 	}
867 
868 	if (SIS_IS_KERNEL_UP(softs)) {
869 		PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
870 		return ret;
871 	}
872 	/* Disable interrupts ? */
873 	sis_disable_interrupt(softs);
874 
875 	/* reset pqi, this will delete queues */
876 	ret = pqi_reset(softs);
877 	if (ret) {
878 		return ret;
879 	}
880 	/* Re enable SIS */
881 	ret = pqisrc_reenable_sis(softs);
882 	if (ret) {
883 		return ret;
884 	}
885 
886 	PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
887 
888 	return ret;
889 }
890 
891 static int
892 pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
893 {
894 	int count = 0;
895 	int ret = PQI_STATUS_SUCCESS;
896 
897 	DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
898 
899 	if (softs->taglist.num_elem == softs->max_outstanding_io)
900 		return ret;
901 	else {
902 		DBG_WARN("%d commands pending\n",
903 		softs->max_outstanding_io - softs->taglist.num_elem);
904 
905 		while(1) {
906 
907 			/* Since heartbeat timer stopped ,check for firmware status*/
908 			if (SIS_IS_KERNEL_PANIC(softs)) {
909 				DBG_ERR("Controller FW is not running\n");
910 				return PQI_STATUS_FAILURE;
911 			}
912 
913 			if (softs->taglist.num_elem != softs->max_outstanding_io) {
914 				/* Sleep for 1 msec */
915 				OS_SLEEP(1000);
916 				count++;
917 				if(count % 1000 == 0) {
918 					DBG_WARN("Waited for %d seconds", count/1000);
919 				}
920 				if (count >= PQI_QUIESCE_TIMEOUT) {
921 					return PQI_STATUS_FAILURE;
922 				}
923 				continue;
924 			}
925 			break;
926 		}
927 	}
928 	return ret;
929 }
930 
931 static void
932 pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
933 {
934 
935 	int tag = 0;
936 	rcb_t *rcb;
937 
938 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
939 		rcb = &softs->rcb[tag];
940 		if(rcb->req_pending && is_internal_req(rcb)) {
941 			rcb->status = REQUEST_FAILED;
942 			rcb->req_pending = false;
943 		}
944 	}
945 }
946 
947 
948 /*
949  * Uninitialize the resources used during PQI initialization.
950  */
951 void
952 pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
953 {
954 	int i, ret;
955 
956 	DBG_FUNC("IN\n");
957 
958 	/* Wait for any rescan to finish */
959 	pqisrc_wait_for_rescan_complete(softs);
960 
961 	/* Wait for commands to complete */
962 	ret = pqisrc_wait_for_cmnd_complete(softs);
963 
964 	/* disable and free the interrupt resources */
965 	os_destroy_intr(softs);
966 
967 	/* Complete all pending commands. */
968 	if(ret != PQI_STATUS_SUCCESS) {
969 		pqisrc_complete_internal_cmds(softs);
970 		os_complete_outstanding_cmds_nodevice(softs);
971 	}
972 
973 	if(softs->devlist_lockcreated==true){
974 		os_uninit_spinlock(&softs->devlist_lock);
975 		softs->devlist_lockcreated = false;
976 	}
977 
978 	for (i = 0; i <  softs->num_op_raid_ibq; i++) {
979 		/* OP RAID IB Q */
980 		if(softs->op_raid_ib_q[i].lockcreated==true){
981 			OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
982 			softs->op_raid_ib_q[i].lockcreated = false;
983 		}
984 		/* OP AIO IB Q */
985 		if(softs->op_aio_ib_q[i].lockcreated==true){
986 			OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
987 			softs->op_aio_ib_q[i].lockcreated = false;
988 		}
989 	}
990 
991 	/* Free Op queues */
992 	os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
993 	os_dma_mem_free(softs, &softs->op_obq_dma_mem);
994 	os_dma_mem_free(softs, &softs->event_q_dma_mem);
995 
996 
997 
998 	/* Free  rcb */
999 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
1000 
1001 	/* Free request id lists */
1002 	pqisrc_destroy_taglist(softs,&softs->taglist);
1003 
1004 	if(softs->admin_ib_queue.lockcreated==true) {
1005 		OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
1006 		softs->admin_ib_queue.lockcreated = false;
1007 	}
1008 
1009 	/* Free Admin Queue */
1010 	os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
1011 
1012 	/* Switch back to SIS mode */
1013 	if (pqisrc_force_sis(softs)) {
1014 		DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
1015 	}
1016 
1017 	DBG_FUNC("OUT\n");
1018 }
1019 
1020 /*
1021  * Function to initialize the adapter settings.
1022  */
1023 int
1024 pqisrc_init(pqisrc_softstate_t *softs)
1025 {
1026 	int ret = 0;
1027 	int i = 0, j = 0;
1028 
1029 	DBG_FUNC("IN\n");
1030 
1031 	check_struct_sizes();
1032 
1033 	/* Init the Sync interface */
1034 	ret = pqisrc_sis_init(softs);
1035 	if (ret) {
1036 		DBG_ERR("SIS Init failed with error %d\n", ret);
1037 		goto err_out;
1038 	}
1039 
1040 	ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
1041 	if(ret != PQI_STATUS_SUCCESS){
1042 		DBG_ERR(" Failed to initialize scan lock\n");
1043 		goto err_scan_lock;
1044 	}
1045 
1046 	/* Init the PQI interface */
1047 	ret = pqisrc_pqi_init(softs);
1048 	if (ret) {
1049 		DBG_ERR("PQI Init failed with error %d\n", ret);
1050 		goto err_pqi;
1051 	}
1052 
1053 	/* Setup interrupt */
1054 	ret = os_setup_intr(softs);
1055 	if (ret) {
1056 		DBG_ERR("Interrupt setup failed with error %d\n", ret);
1057 		goto err_intr;
1058 	}
1059 
1060 	/* Report event configuration */
1061         ret = pqisrc_report_event_config(softs);
1062         if(ret){
1063                 DBG_ERR(" Failed to configure Report events\n");
1064 		goto err_event;
1065 	}
1066 
1067 	/* Set event configuration*/
1068         ret = pqisrc_set_event_config(softs);
1069         if(ret){
1070                 DBG_ERR(" Failed to configure Set events\n");
1071                 goto err_event;
1072         }
1073 
1074 	/* Check for For PQI spanning */
1075 	ret = pqisrc_get_ctrl_fw_version(softs);
1076         if(ret){
1077                 DBG_ERR(" Failed to get ctrl fw version\n");
1078 		goto err_fw_version;
1079         }
1080 
1081 	/* update driver version in to FW */
1082 	ret = pqisrc_write_driver_version_to_host_wellness(softs);
1083 	if (ret) {
1084 		DBG_ERR(" Failed to update driver version in to FW");
1085 		goto err_host_wellness;
1086 	}
1087 
1088 
1089 	os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
1090 	ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
1091 	if(ret){
1092 		DBG_ERR(" Failed to initialize devlist_lock\n");
1093 		softs->devlist_lockcreated=false;
1094 		goto err_lock;
1095 	}
1096 	softs->devlist_lockcreated = true;
1097 
1098 	/* Get the PQI configuration table to read heart-beat counter*/
1099 	ret = pqisrc_process_config_table(softs);
1100 	if (ret) {
1101 		DBG_ERR("Failed to process PQI configuration table %d\n", ret);
1102 		goto err_config_tab;
1103 	}
1104 
1105 	softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
1106 
1107 	/* Init device list */
1108 	for(i = 0; i < PQI_MAX_DEVICES; i++)
1109 		for(j = 0; j < PQI_MAX_MULTILUN; j++)
1110 			softs->device_list[i][j] = NULL;
1111 
1112 	pqisrc_init_targetid_pool(softs);
1113 
1114 	DBG_FUNC("OUT\n");
1115 	return ret;
1116 
1117 err_config_tab:
1118 	if(softs->devlist_lockcreated==true){
1119 		os_uninit_spinlock(&softs->devlist_lock);
1120 		softs->devlist_lockcreated = false;
1121 	}
1122 err_lock:
1123 err_fw_version:
1124 err_event:
1125 err_host_wellness:
1126 err_intr:
1127 	pqisrc_pqi_uninit(softs);
1128 err_pqi:
1129 	os_destroy_semaphore(&softs->scan_lock);
1130 err_scan_lock:
1131 	pqisrc_sis_uninit(softs);
1132 err_out:
1133 	DBG_FUNC("OUT failed\n");
1134 	return ret;
1135 }
1136 
1137 /*
1138  * Write all data in the adapter's battery-backed cache to
1139  * storage.
1140  */
1141 int
1142 pqisrc_flush_cache( pqisrc_softstate_t *softs,
1143 			enum pqisrc_flush_cache_event_type event_type)
1144 {
1145 	int rval = PQI_STATUS_SUCCESS;
1146 	pqisrc_raid_req_t request;
1147 	pqisrc_bmic_flush_cache_t *flush_buff = NULL;
1148 
1149 	DBG_FUNC("IN\n");
1150 
1151 	if (pqisrc_ctrl_offline(softs))
1152 		return PQI_STATUS_FAILURE;
1153 
1154 	flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
1155 	if (!flush_buff) {
1156 		DBG_ERR("Failed to allocate memory for flush cache params\n");
1157 		rval = PQI_STATUS_FAILURE;
1158 		return rval;
1159 	}
1160 
1161 	flush_buff->halt_event = event_type;
1162 
1163 	memset(&request, 0, sizeof(request));
1164 
1165 	rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
1166 			sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
1167 			(uint8_t *)RAID_CTLR_LUNID, NULL);
1168 	if (rval) {
1169 		DBG_ERR("error in build send raid req ret=%d\n", rval);
1170 	}
1171 
1172 	if (flush_buff)
1173 		os_mem_free(softs, (void *)flush_buff,
1174 			sizeof(pqisrc_bmic_flush_cache_t));
1175 
1176 	DBG_FUNC("OUT\n");
1177 
1178 	return rval;
1179 }
1180 
1181 /*
1182  * Uninitialize the adapter.
1183  */
1184 void
1185 pqisrc_uninit(pqisrc_softstate_t *softs)
1186 {
1187 	DBG_FUNC("IN\n");
1188 
1189 	pqisrc_pqi_uninit(softs);
1190 
1191 	pqisrc_sis_uninit(softs);
1192 
1193 	os_destroy_semaphore(&softs->scan_lock);
1194 
1195 	pqisrc_cleanup_devices(softs);
1196 
1197 	DBG_FUNC("OUT\n");
1198 }
1199