1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_init.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_isr.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
61 static int ql_nvram_24xx_config(ql_adapter_state_t *);
62 static void ql_23_properties(ql_adapter_state_t *, nvram_t *);
63 static void ql_24xx_properties(ql_adapter_state_t *, nvram_24xx_t *);
64 static int ql_check_isp_firmware(ql_adapter_state_t *);
65 static int ql_chip_diag(ql_adapter_state_t *);
66 static int ql_load_flash_fw(ql_adapter_state_t *);
67 static int ql_configure_loop(ql_adapter_state_t *);
68 static int ql_configure_hba(ql_adapter_state_t *);
69 static int ql_configure_fabric(ql_adapter_state_t *);
70 static int ql_configure_device_d_id(ql_adapter_state_t *);
71 static void ql_set_max_read_req(ql_adapter_state_t *);
72 static void ql_configure_n_port_info(ql_adapter_state_t *);
73 static void ql_clear_mcp(ql_adapter_state_t *);
74 static void ql_mps_reset(ql_adapter_state_t *);
75 
76 /*
77  * ql_initialize_adapter
78  *	Initialize board.
79  *
80  * Input:
81  *	ha = adapter state pointer.
82  *
83  * Returns:
84  *	ql local function return status code.
85  *
86  * Context:
87  *	Kernel context.
88  */
89 int
90 ql_initialize_adapter(ql_adapter_state_t *ha)
91 {
92 	int			rval;
93 	class_svc_param_t	*class3_param;
94 	caddr_t			msg;
95 	la_els_logi_t		*els = &ha->loginparams;
96 	int			retries = 5;
97 
98 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
99 
100 	do {
101 		/* Clear adapter flags. */
102 		TASK_DAEMON_LOCK(ha);
103 		ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
104 		    TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
105 		    TASK_DAEMON_IDLE_CHK_FLG;
106 		ha->task_daemon_flags |= LOOP_DOWN;
107 		TASK_DAEMON_UNLOCK(ha);
108 
109 		ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
110 		ADAPTER_STATE_LOCK(ha);
111 		ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
112 		ha->flags &= ~ONLINE;
113 		ADAPTER_STATE_UNLOCK(ha);
114 
115 		ha->state = FC_STATE_OFFLINE;
116 		msg = "Loop OFFLINE";
117 
118 		rval = ql_pci_sbus_config(ha);
119 		if (rval != QL_SUCCESS) {
120 			TASK_DAEMON_LOCK(ha);
121 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
122 				EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
123 				ha->task_daemon_flags |= ISP_ABORT_NEEDED;
124 			}
125 			TASK_DAEMON_UNLOCK(ha);
126 			continue;
127 		}
128 
129 		(void) ql_setup_fcache(ha);
130 
131 		/* Reset ISP chip. */
132 		ql_reset_chip(ha);
133 
134 		/* Get NVRAM configuration if needed. */
135 		if (ha->init_ctrl_blk.cb.version == 0) {
136 			(void) ql_nvram_config(ha);
137 		}
138 
139 		/* Set login parameters. */
140 		if (CFG_IST(ha, CFG_CTRL_242581)) {
141 			els->common_service.rx_bufsize = CHAR_TO_SHORT(
142 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
143 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
144 			bcopy((void *)&ha->init_ctrl_blk.cb24.port_name[0],
145 			    (void *)&els->nport_ww_name.raw_wwn[0], 8);
146 			bcopy((void *)&ha->init_ctrl_blk.cb24.node_name[0],
147 			    (void *)&els->node_ww_name.raw_wwn[0], 8);
148 		} else {
149 			els->common_service.rx_bufsize = CHAR_TO_SHORT(
150 			    ha->init_ctrl_blk.cb.max_frame_length[0],
151 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
152 			bcopy((void *)&ha->init_ctrl_blk.cb.port_name[0],
153 			    (void *)&els->nport_ww_name.raw_wwn[0], 8);
154 			bcopy((void *)&ha->init_ctrl_blk.cb.node_name[0],
155 			    (void *)&els->node_ww_name.raw_wwn[0], 8);
156 		}
157 
158 		/* Determine which RISC code to use. */
159 		(void) ql_check_isp_firmware(ha);
160 
161 		rval = ql_chip_diag(ha);
162 		if (rval == QL_SUCCESS) {
163 			rval = ql_load_isp_firmware(ha);
164 		}
165 
166 		if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
167 		    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
168 
169 			(void) ql_fw_ready(ha, ha->fwwait);
170 
171 			if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
172 			    ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
173 				if (ha->topology & QL_LOOP_CONNECTION) {
174 					ha->state = ha->state | FC_STATE_LOOP;
175 					msg = "Loop ONLINE";
176 					ha->task_daemon_flags |= STATE_ONLINE;
177 				} else if (ha->topology & QL_P2P_CONNECTION) {
178 					ha->state = ha->state |
179 					    FC_STATE_ONLINE;
180 					msg = "Link ONLINE";
181 					ha->task_daemon_flags |= STATE_ONLINE;
182 				} else {
183 					msg = "Unknown Link state";
184 				}
185 			}
186 		} else {
187 			TASK_DAEMON_LOCK(ha);
188 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
189 				EL(ha, "failed, isp_abort_needed\n");
190 				ha->task_daemon_flags |= ISP_ABORT_NEEDED |
191 				    LOOP_DOWN;
192 			}
193 			TASK_DAEMON_UNLOCK(ha);
194 		}
195 
196 	} while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
197 
198 	cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
199 
200 	/* Enable ISP interrupts and login parameters. */
201 	CFG_IST(ha, CFG_CTRL_242581) ? WRT32_IO_REG(ha, ictrl, ISP_EN_RISC):
202 	    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
203 
204 	ADAPTER_STATE_LOCK(ha);
205 	ha->flags |= (INTERRUPTS_ENABLED | ONLINE);
206 	ADAPTER_STATE_UNLOCK(ha);
207 
208 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | RESET_MARKER_NEEDED |
209 	    COMMAND_WAIT_NEEDED);
210 
211 	/*
212 	 * Setup login parameters.
213 	 */
214 	els->common_service.fcph_version = 0x2006;
215 	els->common_service.btob_credit = 3;
216 	els->common_service.cmn_features = 0x8800;
217 	els->common_service.conc_sequences = 0xff;
218 	els->common_service.relative_offset = 3;
219 	els->common_service.e_d_tov = 0x07d0;
220 
221 	class3_param = (class_svc_param_t *)&els->class_3;
222 	class3_param->class_valid_svc_opt = 0x8800;
223 	class3_param->rcv_data_size = els->common_service.rx_bufsize;
224 	class3_param->conc_sequences = 0xff;
225 
226 	if (rval != QL_SUCCESS) {
227 		EL(ha, "failed, rval = %xh\n", rval);
228 	} else {
229 		/*EMPTY*/
230 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
231 	}
232 	return (rval);
233 }
234 
235 /*
236  * ql_pci_sbus_config
237  *	Setup device PCI/SBUS configuration registers.
238  *
239  * Input:
240  *	ha = adapter state pointer.
241  *
242  * Returns:
243  *	ql local function return status code.
244  *
245  * Context:
246  *	Kernel context.
247  */
248 int
249 ql_pci_sbus_config(ql_adapter_state_t *ha)
250 {
251 	uint32_t	timer;
252 	uint16_t	cmd, w16;
253 
254 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
255 
256 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
257 		w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
258 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
259 		EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
260 		    w16 & 0xf);
261 	} else {
262 		/*
263 		 * we want to respect framework's setting of PCI
264 		 * configuration space command register and also
265 		 * want to make sure that all bits of interest to us
266 		 * are properly set in command register.
267 		 */
268 		cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
269 		cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
270 		    PCI_COMM_ME | PCI_COMM_MEMWR_INVAL |
271 		    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
272 
273 		/*
274 		 * If this is a 2300 card and not 2312, reset the
275 		 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
276 		 * 2310 also reports itself as a 2300 so we need to get the
277 		 * fb revision level -- a 6 indicates it really is a 2300 and
278 		 * not a 2310.
279 		 */
280 
281 		if (ha->device_id == 0x2300) {
282 			/* Pause RISC. */
283 			WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
284 			for (timer = 0; timer < 30000; timer++) {
285 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
286 				    0) {
287 					break;
288 				} else {
289 					drv_usecwait(MILLISEC);
290 				}
291 			}
292 
293 			/* Select FPM registers. */
294 			WRT16_IO_REG(ha, ctrl_status, 0x20);
295 
296 			/* Get the fb rev level */
297 			if (RD16_IO_REG(ha, fb_cmd) == 6) {
298 				cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
299 			}
300 
301 			/* Deselect FPM registers. */
302 			WRT16_IO_REG(ha, ctrl_status, 0x0);
303 
304 			/* Release RISC module. */
305 			WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
306 			for (timer = 0; timer < 30000; timer++) {
307 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
308 				    0) {
309 					break;
310 				} else {
311 					drv_usecwait(MILLISEC);
312 				}
313 			}
314 		} else if (ha->device_id == 0x2312) {
315 			/*
316 			 * cPCI ISP2312 specific code to service function 1
317 			 * hot-swap registers.
318 			 */
319 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
320 			    != 0) {
321 				ql_pci_config_put8(ha, 0x66, 0xc2);
322 			}
323 		}
324 
325 		/* max memory read byte cnt override */
326 		if (ha->pci_max_read_req != 0) {
327 			ql_set_max_read_req(ha);
328 		}
329 
330 		ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
331 
332 		/* Set cache line register. */
333 		ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
334 
335 		/* Set latency register. */
336 		ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
337 
338 		/* Reset expansion ROM address decode enable. */
339 		w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
340 		w16 = (uint16_t)(w16 & ~BIT_0);
341 		ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
342 	}
343 
344 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
345 
346 	return (QL_SUCCESS);
347 }
348 
349 /*
350  * Set the PCI max read request value.
351  *
352  * Input:
353  *	ha:		adapter state pointer.
354  *
355  * Output:
356  *	none.
357  *
358  * Returns:
359  *
360  * Context:
361  *	Kernel context.
362  */
363 
364 static void
365 ql_set_max_read_req(ql_adapter_state_t *ha)
366 {
367 	uint16_t	read_req, w16;
368 	uint16_t	tmp = ha->pci_max_read_req;
369 
370 	if ((ha->device_id == 0x2422) ||
371 	    ((ha->device_id & 0xff00) == 0x2300)) {
372 		/* check for vaild override value */
373 		if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
374 		    tmp == 4096) {
375 			/* shift away the don't cares */
376 			tmp = (uint16_t)(tmp >> 10);
377 			/* convert bit pos to request value */
378 			for (read_req = 0; tmp != 0; read_req++) {
379 				tmp = (uint16_t)(tmp >> 1);
380 			}
381 			w16 = (uint16_t)ql_pci_config_get16(ha, 0x4e);
382 			w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
383 			w16 = (uint16_t)(w16 | (read_req << 2));
384 			ql_pci_config_put16(ha, 0x4e, w16);
385 		} else {
386 			EL(ha, "invalid parameter value for "
387 			    "'pci-max-read-request': %d; using system "
388 			    "default\n", tmp);
389 		}
390 	} else if ((ha->device_id == 0x2432) || ((ha->device_id & 0xff00) ==
391 	    0x2500) || (ha->device_id == 0x8432)) {
392 		/* check for vaild override value */
393 		if (tmp == 128 || tmp == 256 || tmp == 512 ||
394 		    tmp == 1024 || tmp == 2048 || tmp == 4096) {
395 			/* shift away the don't cares */
396 			tmp = (uint16_t)(tmp >> 8);
397 			/* convert bit pos to request value */
398 			for (read_req = 0; tmp != 0; read_req++) {
399 				tmp = (uint16_t)(tmp >> 1);
400 			}
401 			w16 = (uint16_t)ql_pci_config_get16(ha, 0x54);
402 			w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
403 			    BIT_12));
404 			w16 = (uint16_t)(w16 | (read_req << 12));
405 			ql_pci_config_put16(ha, 0x54, w16);
406 		} else {
407 			EL(ha, "invalid parameter value for "
408 			    "'pci-max-read-request': %d; using system "
409 			    "default\n", tmp);
410 		}
411 	}
412 }
413 
414 /*
415  * NVRAM configuration.
416  *
417  * Input:
418  *	ha:		adapter state pointer.
419  *	ha->hba_buf = request and response rings
420  *
421  * Output:
422  *	ha->init_ctrl_blk = initialization control block
423  *	host adapters parameters in host adapter block
424  *
425  * Returns:
426  *	ql local function return status code.
427  *
428  * Context:
429  *	Kernel context.
430  */
431 int
432 ql_nvram_config(ql_adapter_state_t *ha)
433 {
434 	uint32_t	cnt;
435 	caddr_t		dptr1, dptr2;
436 	ql_init_cb_t	*icb = &ha->init_ctrl_blk.cb;
437 	ql_ip_init_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb;
438 	nvram_t		*nv = (nvram_t *)ha->request_ring_bp;
439 	uint16_t	*wptr = (uint16_t *)ha->request_ring_bp;
440 	uint8_t		chksum = 0;
441 	int		rval;
442 	int		idpromlen;
443 	char		idprombuf[32];
444 	uint32_t	start_addr;
445 
446 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
447 
448 	if (CFG_IST(ha, CFG_CTRL_242581)) {
449 		return (ql_nvram_24xx_config(ha));
450 	}
451 
452 	start_addr = 0;
453 	if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
454 	    QL_SUCCESS) {
455 		/* Verify valid NVRAM checksum. */
456 		for (cnt = 0; cnt < sizeof (nvram_t)/2; cnt++) {
457 			*wptr = (uint16_t)ql_get_nvram_word(ha,
458 			    (uint32_t)(cnt + start_addr));
459 			chksum = (uint8_t)(chksum + (uint8_t)*wptr);
460 			chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
461 			wptr++;
462 		}
463 		ql_release_nvram(ha);
464 	}
465 
466 	/* Bad NVRAM data, set defaults parameters. */
467 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
468 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
469 	    nv->nvram_version < 1) {
470 
471 		EL(ha, "failed, rval=%xh, checksum=%xh, "
472 		    "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
473 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
474 		    nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
475 		    ha->subven_id, nv->nvram_version);
476 
477 		/* Don't print nvram message if it's an on-board 2200 */
478 		if (!((CFG_IST(ha, CFG_CTRL_2200)) &&
479 		    (ha->xioctl->fdesc.flash_size == 0))) {
480 			cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
481 			    " using driver defaults.", QL_NAME, ha->instance);
482 		}
483 
484 		/* Reset NVRAM data. */
485 		bzero((void *)nv, sizeof (nvram_t));
486 
487 		/*
488 		 * Set default initialization control block.
489 		 */
490 		nv->parameter_block_version = ICB_VERSION;
491 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
492 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
493 
494 		nv->max_frame_length[1] = 4;
495 
496 		/*
497 		 * Allow 2048 byte frames for 2300
498 		 */
499 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
500 			nv->max_frame_length[1] = 8;
501 		}
502 		nv->max_iocb_allocation[1] = 1;
503 		nv->execution_throttle[0] = 16;
504 		nv->login_retry_count = 8;
505 
506 		idpromlen = 32;
507 
508 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
509 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
510 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
511 		    &idpromlen) != DDI_PROP_SUCCESS) {
512 
513 			QL_PRINT_3(CE_CONT, "(%d): Unable to read idprom "
514 			    "property\n", ha->instance);
515 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
516 			    "property", QL_NAME, ha->instance);
517 
518 			nv->port_name[2] = 33;
519 			nv->port_name[3] = 224;
520 			nv->port_name[4] = 139;
521 			nv->port_name[7] = (uint8_t)
522 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
523 		} else {
524 
525 			nv->port_name[2] = idprombuf[2];
526 			nv->port_name[3] = idprombuf[3];
527 			nv->port_name[4] = idprombuf[4];
528 			nv->port_name[5] = idprombuf[5];
529 			nv->port_name[6] = idprombuf[6];
530 			nv->port_name[7] = idprombuf[7];
531 			nv->port_name[0] = (uint8_t)
532 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
533 		}
534 
535 		/* Don't print nvram message if it's an on-board 2200 */
536 		if (!(CFG_IST(ha, CFG_CTRL_2200)) &&
537 		    (ha->xioctl->fdesc.flash_size == 0)) {
538 			cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
539 			    " default HBA parameters and temporary WWPN:"
540 			    " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
541 			    ha->instance, nv->port_name[0], nv->port_name[1],
542 			    nv->port_name[2], nv->port_name[3],
543 			    nv->port_name[4], nv->port_name[5],
544 			    nv->port_name[6], nv->port_name[7]);
545 		}
546 
547 		nv->login_timeout = 4;
548 
549 		/* Set default connection options for the 23xx to 2 */
550 		if (!(CFG_IST(ha, CFG_CTRL_2200))) {
551 			nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
552 			    BIT_5);
553 		}
554 
555 		/*
556 		 * Set default host adapter parameters
557 		 */
558 		nv->host_p[0] = BIT_1;
559 		nv->host_p[1] = BIT_2;
560 		nv->reset_delay = 5;
561 		nv->port_down_retry_count = 8;
562 		nv->maximum_luns_per_target[0] = 8;
563 
564 		rval = QL_FUNCTION_FAILED;
565 	}
566 
567 	/* Check for adapter node name (big endian). */
568 	for (cnt = 0; cnt < 8; cnt++) {
569 		if (nv->node_name[cnt] != 0) {
570 			break;
571 		}
572 	}
573 
574 	/* Copy port name if no node name (big endian). */
575 	if (cnt == 8) {
576 		bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
577 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
578 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
579 	}
580 
581 	/* Reset initialization control blocks. */
582 	bzero((void *)icb, sizeof (ql_init_cb_t));
583 
584 	/* Get driver properties. */
585 	ql_23_properties(ha, nv);
586 
587 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
588 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
589 	    QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
590 	    nv->port_name[2], nv->port_name[3], nv->port_name[4],
591 	    nv->port_name[5], nv->port_name[6], nv->port_name[7],
592 	    nv->node_name[0], nv->node_name[1], nv->node_name[2],
593 	    nv->node_name[3], nv->node_name[4], nv->node_name[5],
594 	    nv->node_name[6], nv->node_name[7]);
595 
596 	/*
597 	 * Copy over NVRAM RISC parameter block
598 	 * to initialization control block.
599 	 */
600 	dptr1 = (caddr_t)icb;
601 	dptr2 = (caddr_t)&nv->parameter_block_version;
602 	cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
603 	    (uintptr_t)&icb->version);
604 	while (cnt-- != 0) {
605 		*dptr1++ = *dptr2++;
606 	}
607 
608 	/* Copy 2nd half. */
609 	dptr1 = (caddr_t)&icb->add_fw_opt[0];
610 	cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
611 	    (uintptr_t)&icb->add_fw_opt[0]);
612 
613 	while (cnt-- != 0) {
614 		*dptr1++ = *dptr2++;
615 	}
616 
617 	/*
618 	 * Setup driver firmware options.
619 	 */
620 	icb->firmware_options[0] = (uint8_t)
621 	    (icb->firmware_options[0] | BIT_6 | BIT_1);
622 
623 	/*
624 	 * There is no use enabling fast post for SBUS or 2300
625 	 * Always enable 64bit addressing, except SBUS cards.
626 	 */
627 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
628 	if (CFG_IST(ha, (CFG_SBUS_CARD | CFG_CTRL_2300 | CFG_CTRL_6322))) {
629 		icb->firmware_options[0] = (uint8_t)
630 		    (icb->firmware_options[0] & ~BIT_3);
631 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
632 			icb->special_options[0] = (uint8_t)
633 			    (icb->special_options[0] | BIT_5);
634 			ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
635 		}
636 	} else {
637 		icb->firmware_options[0] = (uint8_t)
638 		    (icb->firmware_options[0] | BIT_3);
639 	}
640 	/* RIO and ZIO not supported. */
641 	icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
642 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
643 
644 	icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
645 	    BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
646 	icb->firmware_options[0] = (uint8_t)
647 	    (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
648 	icb->firmware_options[1] = (uint8_t)
649 	    (icb->firmware_options[1] & ~BIT_4);
650 
651 	icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
652 	icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
653 
654 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
655 		if ((icb->special_options[1] & 0x20) == 0) {
656 			EL(ha, "50 ohm is not set\n");
657 		}
658 	}
659 	icb->execution_throttle[0] = 0xff;
660 	icb->execution_throttle[1] = 0xff;
661 
662 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
663 		icb->firmware_options[1] = (uint8_t)
664 		    (icb->firmware_options[1] | BIT_7 | BIT_6);
665 		icb->add_fw_opt[1] = (uint8_t)
666 		    (icb->add_fw_opt[1] | BIT_5 | BIT_4);
667 	}
668 
669 	/*
670 	 * Set host adapter parameters
671 	 */
672 	ADAPTER_STATE_LOCK(ha);
673 	ha->nvram_version = nv->nvram_version;
674 	ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
675 	    nv->adapter_features[1]);
676 
677 	nv->host_p[0] & BIT_4 ? (ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD) :
678 	    (ha->cfg_flags &= ~CFG_DISABLE_RISC_CODE_LOAD);
679 	nv->host_p[0] & BIT_5 ? (ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1) :
680 	    (ha->cfg_flags &= ~CFG_SET_CACHE_LINE_SIZE_1);
681 
682 	nv->host_p[1] & BIT_1 ? (ha->cfg_flags |= CFG_ENABLE_LIP_RESET) :
683 	    (ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET);
684 	nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
685 	    (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
686 	nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
687 	    (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
688 
689 	nv->adapter_features[0] & BIT_3 ?
690 	    (ha->cfg_flags |= CFG_MULTI_CHIP_ADAPTER) :
691 	    (ha->cfg_flags &= ~CFG_MULTI_CHIP_ADAPTER);
692 
693 	ADAPTER_STATE_UNLOCK(ha);
694 
695 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
696 	    nv->execution_throttle[1]);
697 	ha->loop_reset_delay = nv->reset_delay;
698 	ha->port_down_retry_count = nv->port_down_retry_count;
699 	ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
700 	    R_A_TOV_DEFAULT : icb->login_timeout);
701 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
702 	    nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
703 	if (ha->maximum_luns_per_target == 0) {
704 		ha->maximum_luns_per_target++;
705 	}
706 
707 	/*
708 	 * Setup ring parameters in initialization control block
709 	 */
710 	cnt = REQUEST_ENTRY_CNT;
711 	icb->request_q_length[0] = LSB(cnt);
712 	icb->request_q_length[1] = MSB(cnt);
713 	cnt = RESPONSE_ENTRY_CNT;
714 	icb->response_q_length[0] = LSB(cnt);
715 	icb->response_q_length[1] = MSB(cnt);
716 
717 	icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
718 	icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
719 	icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
720 	icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
721 	icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
722 	icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
723 	icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
724 	icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
725 
726 	icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
727 	icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
728 	icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
729 	icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
730 	icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
731 	icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
732 	icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
733 	icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
734 
735 	/*
736 	 * Setup IP initialization control block
737 	 */
738 	ip_icb->version = IP_ICB_VERSION;
739 
740 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
741 		ip_icb->ip_firmware_options[0] = (uint8_t)
742 		    (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
743 	} else {
744 		ip_icb->ip_firmware_options[0] = (uint8_t)
745 		    (ip_icb->ip_firmware_options[0] | BIT_2);
746 	}
747 
748 	cnt = RCVBUF_CONTAINER_CNT;
749 	ip_icb->queue_size[0] = LSB(cnt);
750 	ip_icb->queue_size[1] = MSB(cnt);
751 
752 	ip_icb->queue_address[0] = LSB(LSW(LSD(ha->rcvbuf_dvma)));
753 	ip_icb->queue_address[1] = MSB(LSW(LSD(ha->rcvbuf_dvma)));
754 	ip_icb->queue_address[2] = LSB(MSW(LSD(ha->rcvbuf_dvma)));
755 	ip_icb->queue_address[3] = MSB(MSW(LSD(ha->rcvbuf_dvma)));
756 	ip_icb->queue_address[4] = LSB(LSW(MSD(ha->rcvbuf_dvma)));
757 	ip_icb->queue_address[5] = MSB(LSW(MSD(ha->rcvbuf_dvma)));
758 	ip_icb->queue_address[6] = LSB(MSW(MSD(ha->rcvbuf_dvma)));
759 	ip_icb->queue_address[7] = MSB(MSW(MSD(ha->rcvbuf_dvma)));
760 
761 	if (rval != QL_SUCCESS) {
762 		EL(ha, "failed, rval = %xh\n", rval);
763 	} else {
764 		/*EMPTY*/
765 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
766 	}
767 	return (rval);
768 }
769 
770 /*
771  * Get NVRAM data word
772  *	Calculates word position in NVRAM and calls request routine to
773  *	get the word from NVRAM.
774  *
775  * Input:
776  *	ha = adapter state pointer.
777  *	address = NVRAM word address.
778  *
779  * Returns:
780  *	data word.
781  *
782  * Context:
783  *	Kernel context.
784  */
785 uint16_t
786 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
787 {
788 	uint32_t	nv_cmd;
789 	uint16_t	rval;
790 
791 	QL_PRINT_4(CE_CONT, "(%d): started\n", ha->instance);
792 
793 	nv_cmd = address << 16;
794 	nv_cmd = nv_cmd | NV_READ_OP;
795 
796 	rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
797 
798 	QL_PRINT_4(CE_CONT, "(%d): NVRAM data = %xh\n", ha->instance, rval);
799 
800 	return (rval);
801 }
802 
803 /*
804  * NVRAM request
805  *	Sends read command to NVRAM and gets data from NVRAM.
806  *
807  * Input:
808  *	ha = adapter state pointer.
809  *	nv_cmd = Bit 26= start bit
810  *	Bit 25, 24 = opcode
811  *	Bit 23-16 = address
812  *	Bit 15-0 = write data
813  *
814  * Returns:
815  *	data word.
816  *
817  * Context:
818  *	Kernel context.
819  */
820 static uint16_t
821 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
822 {
823 	uint8_t		cnt;
824 	uint16_t	reg_data;
825 	uint16_t	data = 0;
826 
827 	/* Send command to NVRAM. */
828 
829 	nv_cmd <<= 5;
830 	for (cnt = 0; cnt < 11; cnt++) {
831 		if (nv_cmd & BIT_31) {
832 			ql_nv_write(ha, NV_DATA_OUT);
833 		} else {
834 			ql_nv_write(ha, 0);
835 		}
836 		nv_cmd <<= 1;
837 	}
838 
839 	/* Read data from NVRAM. */
840 
841 	for (cnt = 0; cnt < 16; cnt++) {
842 		WRT16_IO_REG(ha, nvram, NV_SELECT+NV_CLOCK);
843 		ql_nv_delay();
844 		data <<= 1;
845 		reg_data = RD16_IO_REG(ha, nvram);
846 		if (reg_data & NV_DATA_IN) {
847 			data = (uint16_t)(data | BIT_0);
848 		}
849 		WRT16_IO_REG(ha, nvram, NV_SELECT);
850 		ql_nv_delay();
851 	}
852 
853 	/* Deselect chip. */
854 
855 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
856 	ql_nv_delay();
857 
858 	return (data);
859 }
860 
861 void
862 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
863 {
864 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
865 	ql_nv_delay();
866 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
867 	ql_nv_delay();
868 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
869 	ql_nv_delay();
870 }
871 
872 void
873 ql_nv_delay(void)
874 {
875 	drv_usecwait(NV_DELAY_COUNT);
876 }
877 
878 /*
879  * ql_nvram_24xx_config
880  *	ISP2400 nvram.
881  *
882  * Input:
883  *	ha:		adapter state pointer.
884  *	ha->hba_buf = request and response rings
885  *
886  * Output:
887  *	ha->init_ctrl_blk = initialization control block
888  *	host adapters parameters in host adapter block
889  *
890  * Returns:
891  *	ql local function return status code.
892  *
893  * Context:
894  *	Kernel context.
895  */
896 int
897 ql_nvram_24xx_config(ql_adapter_state_t *ha)
898 {
899 	uint32_t		index, addr, chksum, saved_chksum;
900 	uint32_t		*longptr;
901 	nvram_24xx_t		nvram;
902 	int			idpromlen;
903 	char			idprombuf[32];
904 	caddr_t			src, dst;
905 	uint16_t		w1;
906 	int			rval;
907 	nvram_24xx_t		*nv = (nvram_24xx_t *)&nvram;
908 	ql_init_24xx_cb_t	*icb =
909 	    (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
910 	ql_ip_init_24xx_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb24;
911 
912 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
913 
914 	if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
915 
916 		/* Get NVRAM data and calculate checksum. */
917 		longptr = (uint32_t *)nv;
918 		chksum = saved_chksum = 0;
919 		for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
920 			rval = ql_24xx_read_flash(ha, addr++, longptr);
921 			if (rval != QL_SUCCESS) {
922 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
923 				break;
924 			}
925 			saved_chksum = chksum;
926 			chksum += *longptr;
927 			LITTLE_ENDIAN_32(longptr);
928 			longptr++;
929 		}
930 
931 		ql_release_nvram(ha);
932 	}
933 
934 	/* Bad NVRAM data, set defaults parameters. */
935 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
936 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
937 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
938 
939 		cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
940 		    "driver defaults.", QL_NAME, ha->instance);
941 
942 		EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
943 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
944 		    nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
945 		    nv->nvram_version[1]));
946 
947 		saved_chksum = ~saved_chksum + 1;
948 
949 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
950 		    MSW(saved_chksum), LSW(saved_chksum));
951 
952 		/* Reset NVRAM data. */
953 		bzero((void *)nv, sizeof (nvram_24xx_t));
954 
955 		/*
956 		 * Set default initialization control block.
957 		 */
958 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
959 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
960 
961 		nv->version[0] = 1;
962 		nv->max_frame_length[1] = 8;
963 		nv->execution_throttle[0] = 16;
964 		nv->max_luns_per_target[0] = 8;
965 
966 		idpromlen = 32;
967 
968 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
969 		if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
970 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
971 		    &idpromlen) != DDI_PROP_SUCCESS) {
972 
973 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
974 			    "property, rval=%x", QL_NAME, ha->instance, rval);
975 
976 			nv->port_name[0] = 33;
977 			nv->port_name[3] = 224;
978 			nv->port_name[4] = 139;
979 			nv->port_name[7] = (uint8_t)
980 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
981 		} else {
982 			nv->port_name[2] = idprombuf[2];
983 			nv->port_name[3] = idprombuf[3];
984 			nv->port_name[4] = idprombuf[4];
985 			nv->port_name[5] = idprombuf[5];
986 			nv->port_name[6] = idprombuf[6];
987 			nv->port_name[7] = idprombuf[7];
988 			nv->port_name[0] = (uint8_t)
989 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
990 		}
991 
992 		cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
993 		    "HBA parameters and temporary "
994 		    "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
995 		    ha->instance, nv->port_name[0], nv->port_name[1],
996 		    nv->port_name[2], nv->port_name[3], nv->port_name[4],
997 		    nv->port_name[5], nv->port_name[6], nv->port_name[7]);
998 
999 		nv->login_retry_count[0] = 8;
1000 
1001 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
1002 		nv->firmware_options_1[1] = BIT_5;
1003 		nv->firmware_options_2[0] = BIT_5;
1004 		nv->firmware_options_2[1] = BIT_4;
1005 		nv->firmware_options_3[1] = BIT_6;
1006 
1007 		/*
1008 		 * Set default host adapter parameters
1009 		 */
1010 		nv->host_p[0] = BIT_4 | BIT_1;
1011 		nv->host_p[1] = BIT_3 | BIT_2;
1012 		nv->reset_delay = 5;
1013 		nv->max_luns_per_target[0] = 128;
1014 		nv->port_down_retry_count[0] = 30;
1015 		nv->link_down_timeout[0] = 30;
1016 
1017 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1018 			nv->firmware_options_3[2] = BIT_4;
1019 			nv->feature_mask_l[0] = 9;
1020 			nv->ext_blk.version[0] = 1;
1021 			nv->ext_blk.fcf_vlan_match = 1;
1022 			nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1023 			nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1024 		}
1025 
1026 		rval = QL_FUNCTION_FAILED;
1027 	}
1028 
1029 	/* Check for adapter node name (big endian). */
1030 	for (index = 0; index < 8; index++) {
1031 		if (nv->node_name[index] != 0) {
1032 			break;
1033 		}
1034 	}
1035 
1036 	/* Copy port name if no node name (big endian). */
1037 	if (index == 8) {
1038 		bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
1039 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
1040 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
1041 	}
1042 
1043 	/* Reset initialization control blocks. */
1044 	bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1045 
1046 	/* Get driver properties. */
1047 	ql_24xx_properties(ha, nv);
1048 
1049 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1050 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1051 	    QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
1052 	    nv->port_name[2], nv->port_name[3], nv->port_name[4],
1053 	    nv->port_name[5], nv->port_name[6], nv->port_name[7],
1054 	    nv->node_name[0], nv->node_name[1], nv->node_name[2],
1055 	    nv->node_name[3], nv->node_name[4], nv->node_name[5],
1056 	    nv->node_name[6], nv->node_name[7]);
1057 
1058 	/*
1059 	 * Copy over NVRAM Firmware Initialization Control Block.
1060 	 */
1061 	dst = (caddr_t)icb;
1062 	src = (caddr_t)&nv->version;
1063 	index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1064 	    (uintptr_t)icb);
1065 	while (index--) {
1066 		*dst++ = *src++;
1067 	}
1068 	icb->login_retry_count[0] = nv->login_retry_count[0];
1069 	icb->login_retry_count[1] = nv->login_retry_count[1];
1070 	icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1071 	icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1072 
1073 	dst = (caddr_t)&icb->interrupt_delay_timer;
1074 	src = (caddr_t)&nv->interrupt_delay_timer;
1075 	index = (uint32_t)((uintptr_t)&icb->qos -
1076 	    (uintptr_t)&icb->interrupt_delay_timer);
1077 	while (index--) {
1078 		*dst++ = *src++;
1079 	}
1080 
1081 	/*
1082 	 * Setup driver firmware options.
1083 	 */
1084 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
1085 		dst = (caddr_t)icb->enode_mac_addr;
1086 		src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1087 		index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1088 		while (index--) {
1089 			*dst++ = *src++;
1090 		}
1091 		dst = (caddr_t)&icb->ext_blk;
1092 		src = (caddr_t)&nv->ext_blk;
1093 		index = sizeof (ql_ext_icb_8100_t);
1094 		while (index--) {
1095 			*dst++ = *src++;
1096 		}
1097 		EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1098 		    icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1099 		    icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1100 		    icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1101 	} else {
1102 		icb->firmware_options_1[0] = (uint8_t)
1103 		    (icb->firmware_options_1[0] | BIT_1);
1104 		icb->firmware_options_1[1] = (uint8_t)
1105 		    (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1106 		icb->firmware_options_3[0] = (uint8_t)
1107 		    (icb->firmware_options_3[0] | BIT_1);
1108 	}
1109 	icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1110 	    ~(BIT_5 | BIT_4));
1111 	icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1112 	    BIT_6);
1113 	icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1114 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1115 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1116 		icb->firmware_options_2[1] = (uint8_t)
1117 		    (icb->firmware_options_2[1] | BIT_4);
1118 	} else {
1119 		icb->firmware_options_2[1] = (uint8_t)
1120 		    (icb->firmware_options_2[1] & ~BIT_4);
1121 	}
1122 
1123 	icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1124 	    ~BIT_7);
1125 
1126 	/* enable special N port 2 N port login behaviour */
1127 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1128 		icb->firmware_options_3[1] =
1129 		    (uint8_t)(icb->firmware_options_3[1] | BIT_0);
1130 	}
1131 
1132 	icb->execution_throttle[0] = 0xff;
1133 	icb->execution_throttle[1] = 0xff;
1134 
1135 	/*
1136 	 * Set host adapter parameters
1137 	 */
1138 	ADAPTER_STATE_LOCK(ha);
1139 	ha->nvram_version = CHAR_TO_SHORT(nv->nvram_version[0],
1140 	    nv->nvram_version[1]);
1141 	nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
1142 	    (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
1143 	nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
1144 	    (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
1145 	ha->cfg_flags &= ~(CFG_DISABLE_RISC_CODE_LOAD |
1146 	    CFG_SET_CACHE_LINE_SIZE_1 | CFG_MULTI_CHIP_ADAPTER);
1147 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1148 	ADAPTER_STATE_UNLOCK(ha);
1149 
1150 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
1151 	    nv->execution_throttle[1]);
1152 	ha->loop_reset_delay = nv->reset_delay;
1153 	ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1154 	    nv->port_down_retry_count[1]);
1155 	w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1156 	ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1157 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
1158 	    nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1159 	if (ha->maximum_luns_per_target == 0) {
1160 		ha->maximum_luns_per_target++;
1161 	}
1162 
1163 	/* ISP2422 Serial Link Control */
1164 	if (CFG_IST(ha, CFG_CTRL_2422)) {
1165 		ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1166 		    nv->fw.isp2400.swing_opt[1]);
1167 		ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1168 		    nv->fw.isp2400.swing_1g[1]);
1169 		ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1170 		    nv->fw.isp2400.swing_2g[1]);
1171 		ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1172 		    nv->fw.isp2400.swing_4g[1]);
1173 	}
1174 
1175 	/*
1176 	 * Setup ring parameters in initialization control block
1177 	 */
1178 	w1 = REQUEST_ENTRY_CNT;
1179 	icb->request_q_length[0] = LSB(w1);
1180 	icb->request_q_length[1] = MSB(w1);
1181 	w1 = RESPONSE_ENTRY_CNT;
1182 	icb->response_q_length[0] = LSB(w1);
1183 	icb->response_q_length[1] = MSB(w1);
1184 
1185 	icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
1186 	icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
1187 	icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
1188 	icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
1189 	icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
1190 	icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
1191 	icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
1192 	icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
1193 
1194 	icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
1195 	icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
1196 	icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
1197 	icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
1198 	icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
1199 	icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
1200 	icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
1201 	icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
1202 
1203 	/*
1204 	 * Setup IP initialization control block
1205 	 */
1206 	ip_icb->version = IP_ICB_24XX_VERSION;
1207 
1208 	ip_icb->ip_firmware_options[0] = (uint8_t)
1209 	    (ip_icb->ip_firmware_options[0] | BIT_2);
1210 
1211 	if (rval != QL_SUCCESS) {
1212 		EL(ha, "failed, rval = %xh\n", rval);
1213 	} else {
1214 		/*EMPTY*/
1215 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1216 	}
1217 	return (rval);
1218 }
1219 
1220 /*
1221  * ql_lock_nvram
1222  *	Locks NVRAM access and returns starting address of NVRAM.
1223  *
1224  * Input:
1225  *	ha:	adapter state pointer.
1226  *	addr:	pointer for start address.
1227  *	flags:	Are mutually exclusive:
1228  *		LNF_NVRAM_DATA --> get nvram
1229  *		LNF_VPD_DATA --> get vpd data (24/25xx only).
1230  *
1231  * Returns:
1232  *	ql local function return status code.
1233  *
1234  * Context:
1235  *	Kernel context.
1236  */
1237 int
1238 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1239 {
1240 	int	i;
1241 
1242 	if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1243 		EL(ha, "invalid options for function");
1244 		return (QL_FUNCTION_FAILED);
1245 	}
1246 
1247 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1248 		if ((flags & LNF_NVRAM_DATA) == 0) {
1249 			EL(ha, "invalid 2312/2322 option for HBA");
1250 			return (QL_FUNCTION_FAILED);
1251 		}
1252 
1253 		/* if function number is non-zero, then adjust offset */
1254 		*addr = ha->flash_nvram_addr;
1255 
1256 		/* Try to get resource lock. Wait for 10 seconds max */
1257 		for (i = 0; i < 10000; i++) {
1258 			/* if nvram busy bit is reset, acquire sema */
1259 			if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1260 				WRT16_IO_REG(ha, host_to_host_sema, 1);
1261 				drv_usecwait(MILLISEC);
1262 				if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1263 					break;
1264 				}
1265 			}
1266 			drv_usecwait(MILLISEC);
1267 		}
1268 		if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1269 			cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1270 			    QL_NAME, ha->instance);
1271 			return (QL_FUNCTION_FAILED);
1272 		}
1273 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1274 		if (flags & LNF_VPD_DATA) {
1275 			*addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1276 		} else if (flags & LNF_NVRAM_DATA) {
1277 			*addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1278 		} else {
1279 			EL(ha, "invalid 2422 option for HBA");
1280 			return (QL_FUNCTION_FAILED);
1281 		}
1282 
1283 		GLOBAL_HW_LOCK();
1284 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1285 		if (flags & LNF_VPD_DATA) {
1286 			*addr = ha->flash_data_addr | ha->flash_vpd_addr;
1287 		} else if (flags & LNF_NVRAM_DATA) {
1288 			*addr = ha->flash_data_addr | ha->flash_nvram_addr;
1289 		} else {
1290 			EL(ha, "invalid 2581 option for HBA");
1291 			return (QL_FUNCTION_FAILED);
1292 		}
1293 
1294 		GLOBAL_HW_LOCK();
1295 	} else {
1296 		if ((flags & LNF_NVRAM_DATA) == 0) {
1297 			EL(ha, "invalid option for HBA");
1298 			return (QL_FUNCTION_FAILED);
1299 		}
1300 		*addr = 0;
1301 		GLOBAL_HW_LOCK();
1302 	}
1303 
1304 	return (QL_SUCCESS);
1305 }
1306 
1307 /*
1308  * ql_release_nvram
1309  *	Releases NVRAM access.
1310  *
1311  * Input:
1312  *	ha:	adapter state pointer.
1313  *
1314  * Context:
1315  *	Kernel context.
1316  */
1317 void
1318 ql_release_nvram(ql_adapter_state_t *ha)
1319 {
1320 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1321 		/* Release resource lock */
1322 		WRT16_IO_REG(ha, host_to_host_sema, 0);
1323 	} else {
1324 		GLOBAL_HW_UNLOCK();
1325 	}
1326 }
1327 
1328 /*
1329  * ql_23_properties
1330  *	Copies driver properties to NVRAM or adapter structure.
1331  *
1332  *	Driver properties are by design global variables and hidden
1333  *	completely from administrators. Knowledgeable folks can
1334  *	override the default values using driver.conf
1335  *
1336  * Input:
1337  *	ha:	adapter state pointer.
1338  *	nv:	NVRAM structure pointer.
1339  *
1340  * Context:
1341  *	Kernel context.
1342  */
1343 static void
1344 ql_23_properties(ql_adapter_state_t *ha, nvram_t *nv)
1345 {
1346 	uint32_t	data, cnt;
1347 
1348 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1349 
1350 	/* Get frame payload size. */
1351 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1352 		data = 2048;
1353 	}
1354 	if (data == 512 || data == 1024 || data == 2048) {
1355 		nv->max_frame_length[0] = LSB(data);
1356 		nv->max_frame_length[1] = MSB(data);
1357 	} else {
1358 		EL(ha, "invalid parameter value for 'max-frame-length': "
1359 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1360 		    nv->max_frame_length[0], nv->max_frame_length[1]));
1361 	}
1362 
1363 	/* Get max IOCB allocation. */
1364 	nv->max_iocb_allocation[0] = 0;
1365 	nv->max_iocb_allocation[1] = 1;
1366 
1367 	/* Get execution throttle. */
1368 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1369 		data = 32;
1370 	}
1371 	if (data != 0 && data < 65536) {
1372 		nv->execution_throttle[0] = LSB(data);
1373 		nv->execution_throttle[1] = MSB(data);
1374 	} else {
1375 		EL(ha, "invalid parameter value for 'execution-throttle': "
1376 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1377 		    nv->execution_throttle[0], nv->execution_throttle[1]));
1378 	}
1379 
1380 	/* Get Login timeout. */
1381 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1382 		data = 3;
1383 	}
1384 	if (data < 256) {
1385 		nv->login_timeout = (uint8_t)data;
1386 	} else {
1387 		EL(ha, "invalid parameter value for 'login-timeout': "
1388 		    "%d; using nvram value of %d\n", data, nv->login_timeout);
1389 	}
1390 
1391 	/* Get retry count. */
1392 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1393 		data = 4;
1394 	}
1395 	if (data < 256) {
1396 		nv->login_retry_count = (uint8_t)data;
1397 	} else {
1398 		EL(ha, "invalid parameter value for 'login-retry-count': "
1399 		    "%d; using nvram value of %d\n", data,
1400 		    nv->login_retry_count);
1401 	}
1402 
1403 	/* Get adapter hard loop ID enable. */
1404 	data =  ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1405 	if (data == 0) {
1406 		nv->firmware_options[0] =
1407 		    (uint8_t)(nv->firmware_options[0] & ~BIT_0);
1408 	} else if (data == 1) {
1409 		nv->firmware_options[0] =
1410 		    (uint8_t)(nv->firmware_options[0] | BIT_0);
1411 	} else if (data != 0xffffffff) {
1412 		EL(ha, "invalid parameter value for "
1413 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1414 		    "of %d\n", data, nv->firmware_options[0] & BIT_0 ? 1 : 0);
1415 	}
1416 
1417 	/* Get adapter hard loop ID. */
1418 	data =  ql_get_prop(ha, "adapter-hard-loop-ID");
1419 	if (data < 126) {
1420 		nv->hard_address[0] = (uint8_t)data;
1421 	} else if (data != 0xffffffff) {
1422 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1423 		    "%d; using nvram value of %d\n",
1424 		    data, nv->hard_address[0]);
1425 	}
1426 
1427 	/* Get LIP reset. */
1428 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1429 	    0xffffffff) {
1430 		data = 0;
1431 	}
1432 	if (data == 0) {
1433 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_1);
1434 	} else if (data == 1) {
1435 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_1);
1436 	} else {
1437 		EL(ha, "invalid parameter value for "
1438 		    "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1439 		    "of %d\n", data, nv->host_p[1] & BIT_1 ? 1 : 0);
1440 	}
1441 
1442 	/* Get LIP full login. */
1443 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1444 	    0xffffffff) {
1445 		data = 1;
1446 	}
1447 	if (data == 0) {
1448 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1449 	} else if (data == 1) {
1450 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1451 	} else {
1452 		EL(ha, "invalid parameter value for "
1453 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1454 		    "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1455 	}
1456 
1457 	/* Get target reset. */
1458 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1459 	    0xffffffff) {
1460 		data = 0;
1461 	}
1462 	if (data == 0) {
1463 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1464 	} else if (data == 1) {
1465 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1466 	} else {
1467 		EL(ha, "invalid parameter value for "
1468 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1469 		    "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1470 	}
1471 
1472 	/* Get reset delay. */
1473 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1474 		data = 5;
1475 	}
1476 	if (data != 0 && data < 256) {
1477 		nv->reset_delay = (uint8_t)data;
1478 	} else {
1479 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1480 		    "using nvram value of %d", data, nv->reset_delay);
1481 	}
1482 
1483 	/* Get port down retry count. */
1484 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1485 		data = 8;
1486 	}
1487 	if (data < 256) {
1488 		nv->port_down_retry_count = (uint8_t)data;
1489 	} else {
1490 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1491 		    " %d; using nvram value of %d\n", data,
1492 		    nv->port_down_retry_count);
1493 	}
1494 
1495 	/* Get connection mode setting. */
1496 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1497 		data = 2;
1498 	}
1499 	cnt = CFG_IST(ha, CFG_CTRL_2200) ? 3 : 2;
1500 	if (data <= cnt) {
1501 		nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] &
1502 		    ~(BIT_6 | BIT_5 | BIT_4));
1503 		nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
1504 		    (uint8_t)(data << 4));
1505 	} else {
1506 		EL(ha, "invalid parameter value for 'connection-options': "
1507 		    "%d; using nvram value of %d\n", data,
1508 		    (nv->add_fw_opt[0] >> 4) & 0x3);
1509 	}
1510 
1511 	/* Get data rate setting. */
1512 	if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
1513 		if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1514 			data = 2;
1515 		}
1516 		if (data < 3) {
1517 			nv->special_options[1] = (uint8_t)
1518 			    (nv->special_options[1] & 0x3f);
1519 			nv->special_options[1] = (uint8_t)
1520 			    (nv->special_options[1] | (uint8_t)(data << 6));
1521 		} else {
1522 			EL(ha, "invalid parameter value for 'fc-data-rate': "
1523 			    "%d; using nvram value of %d\n", data,
1524 			    (nv->special_options[1] >> 6) & 0x3);
1525 		}
1526 	}
1527 
1528 	/* Get adapter id string for Sun branded 23xx only */
1529 	if ((CFG_IST(ha, CFG_CTRL_2300)) && nv->adapInfo[0] != 0) {
1530 		(void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
1531 		    nv->adapInfo);
1532 	}
1533 
1534 	/* Get IP FW container count. */
1535 	ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1536 	ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1537 
1538 	/* Get IP low water mark. */
1539 	ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1540 	ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1541 
1542 	/* Get IP fast register post count. */
1543 	ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1544 	    ql_ip_fast_post_count;
1545 
1546 	ADAPTER_STATE_LOCK(ha);
1547 
1548 	ql_common_properties(ha);
1549 
1550 	ADAPTER_STATE_UNLOCK(ha);
1551 
1552 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1553 }
1554 
1555 /*
1556  * ql_common_properties
1557  *	Driver properties adapter structure.
1558  *
1559  *	Driver properties are by design global variables and hidden
1560  *	completely from administrators. Knowledgeable folks can
1561  *	override the default values using driver.conf
1562  *
1563  * Input:
1564  *	ha:	adapter state pointer.
1565  *
1566  * Context:
1567  *	Kernel context.
1568  */
1569 void
1570 ql_common_properties(ql_adapter_state_t *ha)
1571 {
1572 	uint32_t	data;
1573 
1574 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1575 
1576 	/* Get extended logging trace buffer size. */
1577 	if ((data = ql_get_prop(ha, "set-ext-log-buffer-size")) !=
1578 	    0xffffffff && data != 0) {
1579 		char		*new_trace;
1580 		uint32_t	new_size;
1581 
1582 		if (ha->el_trace_desc->trace_buffer != NULL) {
1583 			new_size = 1024 * data;
1584 			new_trace = (char *)kmem_zalloc(new_size, KM_SLEEP);
1585 
1586 			if (new_trace == NULL) {
1587 				cmn_err(CE_WARN, "%s(%d): can't get new"
1588 				    " trace buffer",
1589 				    QL_NAME, ha->instance);
1590 			} else {
1591 				/* free the previous */
1592 				kmem_free(ha->el_trace_desc->trace_buffer,
1593 				    ha->el_trace_desc->trace_buffer_size);
1594 				/* Use the new one */
1595 				ha->el_trace_desc->trace_buffer = new_trace;
1596 				ha->el_trace_desc->trace_buffer_size = new_size;
1597 			}
1598 		}
1599 
1600 	}
1601 
1602 	/* Get extended logging enable. */
1603 	if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1604 	    data == 0) {
1605 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1606 	} else if (data == 1) {
1607 		ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1608 	} else {
1609 		EL(ha, "invalid parameter value for 'extended-logging': %d;"
1610 		    " using default value of 0\n", data);
1611 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1612 	}
1613 
1614 	/* Get extended logging trace disable. */
1615 	if ((data = ql_get_prop(ha, "disable-extended-logging-trace")) ==
1616 	    0xffffffff || data == 0) {
1617 		ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1618 	} else if (data == 1) {
1619 		ha->cfg_flags |= CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1620 	} else {
1621 		EL(ha, "invalid parameter value for "
1622 		    "'disable-extended-logging-trace': %d;"
1623 		    " using default value of 0\n", data);
1624 		ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1625 	}
1626 
1627 	/* Get FCP 2 Error Recovery. */
1628 	if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1629 	    0xffffffff || data == 1) {
1630 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1631 	} else if (data == 0) {
1632 		ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1633 	} else {
1634 		EL(ha, "invalid parameter value for "
1635 		    "'enable-FCP-2-error-recovery': %d; using nvram value of "
1636 		    "1\n", data);
1637 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1638 	}
1639 
1640 #ifdef QL_DEBUG_LEVEL_2
1641 	ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1642 #endif
1643 
1644 	/* Get port down retry delay. */
1645 	if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1646 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1647 	} else if (data < 256) {
1648 		ha->port_down_retry_delay = (uint8_t)data;
1649 	} else {
1650 		EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1651 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1652 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1653 	}
1654 
1655 	/* Get queue full retry count. */
1656 	if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1657 		ha->qfull_retry_count = 16;
1658 	} else if (data < 256) {
1659 		ha->qfull_retry_count = (uint8_t)data;
1660 	} else {
1661 		EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1662 		    " %d; using default value of 16", data);
1663 		ha->qfull_retry_count = 16;
1664 	}
1665 
1666 	/* Get queue full retry delay. */
1667 	if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1668 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1669 	} else if (data < 256) {
1670 		ha->qfull_retry_delay = (uint8_t)data;
1671 	} else {
1672 		EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1673 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1674 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1675 	}
1676 
1677 	/* Get loop down timeout. */
1678 	if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1679 		data = 0;
1680 	} else if (data > 255) {
1681 		EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1682 		    " using nvram value of 0\n", data);
1683 		data = 0;
1684 	}
1685 	ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1686 	if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1687 		ha->loop_down_abort_time--;
1688 	} else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1689 		ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1690 	}
1691 
1692 	/* Get link down error enable. */
1693 	if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1694 	    data == 1) {
1695 		ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1696 	} else if (data == 0) {
1697 		ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1698 	} else {
1699 		EL(ha, "invalid parameter value for 'link-down-error': %d;"
1700 		    " using default value of 1\n", data);
1701 	}
1702 
1703 	/*
1704 	 * Get firmware dump flags.
1705 	 *	TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT		BIT_0
1706 	 *	TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR	BIT_1
1707 	 *	TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT	BIT_2
1708 	 *	TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT	BIT_3
1709 	 */
1710 	ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1711 	    CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1712 	    CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1713 	if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1714 		if (data & BIT_0) {
1715 			ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1716 		}
1717 		if (data & BIT_1) {
1718 			ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1719 		}
1720 		if (data & BIT_2) {
1721 			ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1722 		}
1723 		if (data & BIT_3) {
1724 			ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1725 		}
1726 	}
1727 
1728 	/* Get the PCI max read request size override. */
1729 	ha->pci_max_read_req = 0;
1730 	if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1731 	    data != 0) {
1732 		ha->pci_max_read_req = (uint16_t)(data);
1733 	}
1734 
1735 	/*
1736 	 * Set default fw wait, adjusted for slow FCF's.
1737 	 * Revisit when FCF's as fast as FC switches.
1738 	 */
1739 	ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_CTRL_81XX) ? 45 : 10);
1740 	/* Get the attach fw_ready override value. */
1741 	if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1742 		if (data > 0 && data <= 240) {
1743 			ha->fwwait = (uint8_t)data;
1744 		} else {
1745 			EL(ha, "invalid parameter value for "
1746 			    "'init-loop-sync-wait': %d; using default "
1747 			    "value of %d\n", data, ha->fwwait);
1748 		}
1749 	}
1750 
1751 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1752 }
1753 
1754 /*
1755  * ql_24xx_properties
1756  *	Copies driver properties to NVRAM or adapter structure.
1757  *
1758  *	Driver properties are by design global variables and hidden
1759  *	completely from administrators. Knowledgeable folks can
1760  *	override the default values using /etc/system.
1761  *
1762  * Input:
1763  *	ha:	adapter state pointer.
1764  *	nv:	NVRAM structure pointer.
1765  *
1766  * Context:
1767  *	Kernel context.
1768  */
1769 static void
1770 ql_24xx_properties(ql_adapter_state_t *ha, nvram_24xx_t *nv)
1771 {
1772 	uint32_t	data;
1773 
1774 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1775 
1776 	/* Get frame size */
1777 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1778 		data = 2048;
1779 	}
1780 	if (data == 512 || data == 1024 || data == 2048) {
1781 		nv->max_frame_length[0] = LSB(data);
1782 		nv->max_frame_length[1] = MSB(data);
1783 	} else {
1784 		EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1785 		    " using nvram default of %d\n", data, CHAR_TO_SHORT(
1786 		    nv->max_frame_length[0], nv->max_frame_length[1]));
1787 	}
1788 
1789 	/* Get execution throttle. */
1790 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1791 		data = 32;
1792 	}
1793 	if (data != 0 && data < 65536) {
1794 		nv->execution_throttle[0] = LSB(data);
1795 		nv->execution_throttle[1] = MSB(data);
1796 	} else {
1797 		EL(ha, "invalid parameter value for 'execution-throttle':"
1798 		    " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1799 		    nv->execution_throttle[0], nv->execution_throttle[1]));
1800 	}
1801 
1802 	/* Get Login timeout. */
1803 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1804 		data = 3;
1805 	}
1806 	if (data < 65536) {
1807 		nv->login_timeout[0] = LSB(data);
1808 		nv->login_timeout[1] = MSB(data);
1809 	} else {
1810 		EL(ha, "invalid parameter value for 'login-timeout': %d; "
1811 		    "using nvram value of %d\n", data, CHAR_TO_SHORT(
1812 		    nv->login_timeout[0], nv->login_timeout[1]));
1813 	}
1814 
1815 	/* Get retry count. */
1816 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1817 		data = 4;
1818 	}
1819 	if (data < 65536) {
1820 		nv->login_retry_count[0] = LSB(data);
1821 		nv->login_retry_count[1] = MSB(data);
1822 	} else {
1823 		EL(ha, "invalid parameter value for 'login-retry-count': "
1824 		    "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1825 		    nv->login_retry_count[0], nv->login_retry_count[1]));
1826 	}
1827 
1828 	/* Get adapter hard loop ID enable. */
1829 	data =  ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1830 	if (data == 0) {
1831 		nv->firmware_options_1[0] =
1832 		    (uint8_t)(nv->firmware_options_1[0] & ~BIT_0);
1833 	} else if (data == 1) {
1834 		nv->firmware_options_1[0] =
1835 		    (uint8_t)(nv->firmware_options_1[0] | BIT_0);
1836 	} else if (data != 0xffffffff) {
1837 		EL(ha, "invalid parameter value for "
1838 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1839 		    "of %d\n", data,
1840 		    nv->firmware_options_1[0] & BIT_0 ? 1 : 0);
1841 	}
1842 
1843 	/* Get adapter hard loop ID. */
1844 	data =  ql_get_prop(ha, "adapter-hard-loop-ID");
1845 	if (data < 126) {
1846 		nv->hard_address[0] = LSB(data);
1847 		nv->hard_address[1] = MSB(data);
1848 	} else if (data != 0xffffffff) {
1849 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1850 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1851 		    nv->hard_address[0], nv->hard_address[1]));
1852 	}
1853 
1854 	/* Get LIP reset. */
1855 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1856 	    0xffffffff) {
1857 		data = 0;
1858 	}
1859 	if (data == 0) {
1860 		ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1861 	} else if (data == 1) {
1862 		ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1863 	} else {
1864 		EL(ha, "invalid parameter value for "
1865 		    "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1866 		    data);
1867 	}
1868 
1869 	/* Get LIP full login. */
1870 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1871 	    0xffffffff) {
1872 		data = 1;
1873 	}
1874 	if (data == 0) {
1875 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1876 	} else if (data == 1) {
1877 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1878 	} else {
1879 		EL(ha, "invalid parameter value for "
1880 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1881 		    "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1882 	}
1883 
1884 	/* Get target reset. */
1885 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1886 	    0xffffffff) {
1887 		data = 0;
1888 	}
1889 	if (data == 0) {
1890 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1891 	} else if (data == 1) {
1892 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1893 	} else {
1894 		EL(ha, "invalid parameter value for "
1895 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1896 		    "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1897 	}
1898 
1899 	/* Get reset delay. */
1900 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1901 		data = 5;
1902 	}
1903 	if (data != 0 && data < 256) {
1904 		nv->reset_delay = (uint8_t)data;
1905 	} else {
1906 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1907 		    "using nvram value of %d", data, nv->reset_delay);
1908 	}
1909 
1910 	/* Get port down retry count. */
1911 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1912 		data = 8;
1913 	}
1914 	if (data < 256) {
1915 		nv->port_down_retry_count[0] = LSB(data);
1916 		nv->port_down_retry_count[1] = MSB(data);
1917 	} else {
1918 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1919 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1920 		    nv->port_down_retry_count[0],
1921 		    nv->port_down_retry_count[1]));
1922 	}
1923 
1924 	/* Get connection mode setting. */
1925 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1926 		data = 2;
1927 	}
1928 	if (data <= 2) {
1929 		nv->firmware_options_2[0] = (uint8_t)
1930 		    (nv->firmware_options_2[0] & ~(BIT_6 | BIT_5 | BIT_4));
1931 		nv->firmware_options_2[0] = (uint8_t)
1932 		    (nv->firmware_options_2[0] | (uint8_t)(data << 4));
1933 	} else {
1934 		EL(ha, "invalid parameter value for 'connection-options':"
1935 		    " %d; using nvram value of %d\n", data,
1936 		    (nv->firmware_options_2[0] >> 4) & 0x3);
1937 	}
1938 
1939 	/* Get data rate setting. */
1940 	if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1941 		data = 2;
1942 	}
1943 	if ((CFG_IST(ha, CFG_CTRL_2422) && data < 4) ||
1944 	    (CFG_IST(ha, CFG_CTRL_2581) && data < 5)) {
1945 		nv->firmware_options_3[1] = (uint8_t)
1946 		    (nv->firmware_options_3[1] & 0x1f);
1947 		nv->firmware_options_3[1] = (uint8_t)
1948 		    (nv->firmware_options_3[1] | (uint8_t)(data << 5));
1949 	} else {
1950 		EL(ha, "invalid parameter value for 'fc-data-rate': %d; "
1951 		    "using nvram value of %d\n", data,
1952 		    (nv->firmware_options_3[1] >> 5) & 0x7);
1953 	}
1954 
1955 	/* Get IP FW container count. */
1956 	ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
1957 	ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
1958 
1959 	/* Get IP low water mark. */
1960 	ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
1961 	ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
1962 
1963 	ADAPTER_STATE_LOCK(ha);
1964 
1965 	/* Get enable flash load. */
1966 	if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
1967 	    data == 0) {
1968 		ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
1969 	} else if (data == 1) {
1970 		ha->cfg_flags |= CFG_LOAD_FLASH_FW;
1971 	} else {
1972 		EL(ha, "invalid parameter value for 'enable-flash-load': "
1973 		    "%d; using default value of 0\n", data);
1974 	}
1975 
1976 	/* Enable firmware extended tracing */
1977 	if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
1978 		if (data != 0) {
1979 			ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
1980 		}
1981 	}
1982 
1983 	/* Enable firmware fc tracing */
1984 	if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
1985 		ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
1986 		ha->fwfcetraceopt = data;
1987 	}
1988 
1989 	ql_common_properties(ha);
1990 
1991 	ADAPTER_STATE_UNLOCK(ha);
1992 
1993 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1994 }
1995 
1996 /*
1997  * ql_get_prop
1998  *	Get property value from configuration file.
1999  *
2000  * Input:
2001  *	ha= adapter state pointer.
2002  *	string = property string pointer.
2003  *
2004  * Returns:
2005  *	0xFFFFFFFF = no property else property value.
2006  *
2007  * Context:
2008  *	Kernel context.
2009  */
2010 uint32_t
2011 ql_get_prop(ql_adapter_state_t *ha, char *string)
2012 {
2013 	char		buf[256];
2014 	uint32_t	data = 0xffffffff;
2015 
2016 	/*
2017 	 * Look for a adapter instance NPIV (virtual port) specific parameter
2018 	 */
2019 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2020 		(void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2021 		    ha->vp_index, string);
2022 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2023 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2024 		    buf, (int)0xffffffff);
2025 	}
2026 
2027 	/*
2028 	 * Get adapter instance parameter if a vp specific one isn't found.
2029 	 */
2030 	if (data == 0xffffffff) {
2031 		(void) sprintf(buf, "hba%d-%s", ha->instance, string);
2032 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2033 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2034 		    0, buf, (int)0xffffffff);
2035 	}
2036 
2037 	/* Adapter instance parameter found? */
2038 	if (data == 0xffffffff) {
2039 		/* No, get default parameter. */
2040 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2041 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2042 		    string, (int)0xffffffff);
2043 	}
2044 
2045 	return (data);
2046 }
2047 
2048 /*
2049  * ql_check_isp_firmware
2050  *	Checks if using already loaded RISC code or drivers copy.
2051  *	If using already loaded code, save a copy of it.
2052  *
2053  * Input:
2054  *	ha = adapter state pointer.
2055  *
2056  * Returns:
2057  *	ql local function return status code.
2058  *
2059  * Context:
2060  *	Kernel context.
2061  */
2062 static int
2063 ql_check_isp_firmware(ql_adapter_state_t *ha)
2064 {
2065 	int		rval;
2066 	uint16_t	word_count;
2067 	uint32_t	byte_count;
2068 	uint32_t	fw_size, *lptr;
2069 	caddr_t		bufp;
2070 	uint16_t	risc_address = (uint16_t)ha->risc_fw[0].addr;
2071 
2072 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2073 
2074 	if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2075 		if (ha->risc_code != NULL) {
2076 			kmem_free(ha->risc_code, ha->risc_code_size);
2077 			ha->risc_code = NULL;
2078 			ha->risc_code_size = 0;
2079 		}
2080 
2081 		/* Get RISC code length. */
2082 		rval = ql_rd_risc_ram(ha, risc_address + 3, ha->request_dvma,
2083 		    1);
2084 		if (rval == QL_SUCCESS) {
2085 			lptr = (uint32_t *)ha->request_ring_bp;
2086 			fw_size = *lptr << 1;
2087 
2088 			if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2089 				ha->risc_code_size = fw_size;
2090 				ha->risc_code = bufp;
2091 				ha->fw_transfer_size = 128;
2092 
2093 				/* Dump RISC code. */
2094 				do {
2095 					if (fw_size > ha->fw_transfer_size) {
2096 						byte_count =
2097 						    ha->fw_transfer_size;
2098 					} else {
2099 						byte_count = fw_size;
2100 					}
2101 
2102 					word_count =
2103 					    (uint16_t)(byte_count >> 1);
2104 
2105 					rval = ql_rd_risc_ram(ha, risc_address,
2106 					    ha->request_dvma, word_count);
2107 					if (rval != QL_SUCCESS) {
2108 						kmem_free(ha->risc_code,
2109 						    ha->risc_code_size);
2110 						ha->risc_code = NULL;
2111 						ha->risc_code_size = 0;
2112 						break;
2113 					}
2114 
2115 					(void) ddi_dma_sync(
2116 					    ha->hba_buf.dma_handle,
2117 					    REQUEST_Q_BUFFER_OFFSET,
2118 					    byte_count,
2119 					    DDI_DMA_SYNC_FORKERNEL);
2120 					ddi_rep_get16(ha->hba_buf.acc_handle,
2121 					    (uint16_t *)bufp,
2122 					    (uint16_t *)ha->request_ring_bp,
2123 					    word_count, DDI_DEV_AUTOINCR);
2124 
2125 					risc_address += word_count;
2126 					fw_size -= byte_count;
2127 					bufp	+= byte_count;
2128 				} while (fw_size != 0);
2129 			}
2130 		}
2131 	} else {
2132 		rval = QL_FUNCTION_FAILED;
2133 	}
2134 
2135 	if (rval != QL_SUCCESS) {
2136 		EL(ha, "Load RISC code\n");
2137 	} else {
2138 		/*EMPTY*/
2139 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2140 	}
2141 	return (rval);
2142 }
2143 
2144 /*
2145  * Chip diagnostics
2146  *	Test chip for proper operation.
2147  *
2148  * Input:
2149  *	ha = adapter state pointer.
2150  *
2151  * Returns:
2152  *	ql local function return status code.
2153  *
2154  * Context:
2155  *	Kernel context.
2156  */
2157 static int
2158 ql_chip_diag(ql_adapter_state_t *ha)
2159 {
2160 	ql_mbx_data_t	mr;
2161 	int32_t		rval = QL_FUNCTION_FAILED;
2162 	int32_t		retries = 4;
2163 	uint16_t	id;
2164 
2165 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2166 
2167 	do {
2168 		/* Reset ISP chip. */
2169 		TASK_DAEMON_LOCK(ha);
2170 		ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
2171 		TASK_DAEMON_UNLOCK(ha);
2172 		ql_reset_chip(ha);
2173 
2174 		/* For ISP2200A reduce firmware load size. */
2175 		if (CFG_IST(ha, CFG_CTRL_2200) &&
2176 		    RD16_IO_REG(ha, mailbox[7]) == 4) {
2177 			ha->fw_transfer_size = 128;
2178 		} else {
2179 			ha->fw_transfer_size = REQUEST_QUEUE_SIZE;
2180 		}
2181 
2182 		/* Check product ID of chip */
2183 		mr.mb[1] = RD16_IO_REG(ha, mailbox[1]);
2184 		mr.mb[2] = RD16_IO_REG(ha, mailbox[2]);
2185 		mr.mb[3] = RD16_IO_REG(ha, mailbox[3]);
2186 
2187 		if (ha->device_id == 0x5432 || ha->device_id == 0x8432) {
2188 			id = 0x2432;
2189 		} else if (ha->device_id == 0x5422 ||
2190 		    ha->device_id == 0x8422) {
2191 			id = 0x2422;
2192 		} else {
2193 			id = ha->device_id;
2194 		}
2195 
2196 		if (mr.mb[1] == PROD_ID_1 &&
2197 		    (mr.mb[2] == PROD_ID_2 || mr.mb[2] == PROD_ID_2a) &&
2198 		    (mr.mb[3] == PROD_ID_3 || mr.mb[3] == id)) {
2199 
2200 			ha->adapter_stats->revlvl.isp2200 = RD16_IO_REG(ha,
2201 			    mailbox[4]);
2202 			ha->adapter_stats->revlvl.risc = RD16_IO_REG(ha,
2203 			    mailbox[5]);
2204 			ha->adapter_stats->revlvl.frmbfr = RD16_IO_REG(ha,
2205 			    mailbox[6]);
2206 			ha->adapter_stats->revlvl.riscrom = RD16_IO_REG(ha,
2207 			    mailbox[7]);
2208 			bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv,
2209 			    strlen(QL_VERSION));
2210 
2211 			/* Wrap Incoming Mailboxes Test. */
2212 			mr.mb[1] = 0xAAAA;
2213 			mr.mb[2] = 0x5555;
2214 			mr.mb[3] = 0xAA55;
2215 			mr.mb[4] = 0x55AA;
2216 			mr.mb[5] = 0xA5A5;
2217 			mr.mb[6] = 0x5A5A;
2218 			mr.mb[7] = 0x2525;
2219 			rval = ql_mbx_wrap_test(ha, &mr);
2220 			if (rval == QL_SUCCESS) {
2221 				if (mr.mb[1] != 0xAAAA ||
2222 				    mr.mb[2] != 0x5555 ||
2223 				    mr.mb[3] != 0xAA55 ||
2224 				    mr.mb[4] != 0x55AA ||
2225 				    mr.mb[5] != 0xA5A5 ||
2226 				    mr.mb[6] != 0x5A5A ||
2227 				    mr.mb[7] != 0x2525) {
2228 					rval = QL_FUNCTION_FAILED;
2229 					(void) ql_flash_errlog(ha,
2230 					    FLASH_ERRLOG_ISP_ERR, 0,
2231 					    RD16_IO_REG(ha, hccr),
2232 					    RD16_IO_REG(ha, istatus));
2233 				}
2234 			} else {
2235 				cmn_err(CE_WARN, "%s(%d) - reg test failed="
2236 				    "%xh!", QL_NAME, ha->instance, rval);
2237 			}
2238 		} else {
2239 			cmn_err(CE_WARN, "%s(%d) - prod id failed!, mb1=%xh, "
2240 			    "mb2=%xh, mb3=%xh", QL_NAME, ha->instance,
2241 			    mr.mb[1], mr.mb[2], mr.mb[3]);
2242 		}
2243 	} while ((retries-- != 0) && (rval != QL_SUCCESS));
2244 
2245 	if (rval != QL_SUCCESS) {
2246 		EL(ha, "failed, rval = %xh\n", rval);
2247 	} else {
2248 		/*EMPTY*/
2249 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2250 	}
2251 	return (rval);
2252 }
2253 
2254 /*
2255  * ql_load_isp_firmware
2256  *	Load and start RISC firmware.
2257  *	Uses request ring for DMA buffer.
2258  *
2259  * Input:
2260  *	ha = adapter state pointer.
2261  *
2262  * Returns:
2263  *	ql local function return status code.
2264  *
2265  * Context:
2266  *	Kernel context.
2267  */
2268 int
2269 ql_load_isp_firmware(ql_adapter_state_t *vha)
2270 {
2271 	caddr_t			risc_code_address;
2272 	uint32_t		risc_address, risc_code_size;
2273 	int			rval;
2274 	uint32_t		word_count, cnt;
2275 	size_t			byte_count;
2276 	ql_adapter_state_t	*ha = vha->pha;
2277 
2278 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
2279 		ql_mps_reset(ha);
2280 	}
2281 
2282 	if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2283 		return (ql_load_flash_fw(ha));
2284 	}
2285 
2286 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2287 
2288 	/* Load firmware segments */
2289 	for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2290 	    ha->risc_fw[cnt].code != NULL; cnt++) {
2291 
2292 		risc_code_address = ha->risc_fw[cnt].code;
2293 		risc_address = ha->risc_fw[cnt].addr;
2294 		risc_code_size = ha->risc_fw[cnt].length;
2295 
2296 		while (risc_code_size) {
2297 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2298 				word_count = ha->fw_transfer_size >> 2;
2299 				if (word_count > risc_code_size) {
2300 					word_count = risc_code_size;
2301 				}
2302 				byte_count = word_count << 2;
2303 
2304 				ddi_rep_put32(ha->hba_buf.acc_handle,
2305 				    (uint32_t *)risc_code_address,
2306 				    (uint32_t *)ha->request_ring_bp,
2307 				    word_count, DDI_DEV_AUTOINCR);
2308 			} else {
2309 				word_count = ha->fw_transfer_size >> 1;
2310 				if (word_count > risc_code_size) {
2311 					word_count = risc_code_size;
2312 				}
2313 				byte_count = word_count << 1;
2314 
2315 				ddi_rep_put16(ha->hba_buf.acc_handle,
2316 				    (uint16_t *)risc_code_address,
2317 				    (uint16_t *)ha->request_ring_bp,
2318 				    word_count, DDI_DEV_AUTOINCR);
2319 			}
2320 
2321 			(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2322 			    REQUEST_Q_BUFFER_OFFSET, byte_count,
2323 			    DDI_DMA_SYNC_FORDEV);
2324 
2325 			rval = ql_wrt_risc_ram(ha, risc_address,
2326 			    ha->request_dvma, word_count);
2327 			if (rval != QL_SUCCESS) {
2328 				EL(ha, "failed, load=%xh\n", rval);
2329 				cnt = MAX_RISC_CODE_SEGMENTS;
2330 				break;
2331 			}
2332 
2333 			risc_address += word_count;
2334 			risc_code_size -= word_count;
2335 			risc_code_address += byte_count;
2336 		}
2337 	}
2338 
2339 	/* Start firmware. */
2340 	if (rval == QL_SUCCESS) {
2341 		rval = ql_start_firmware(ha);
2342 	}
2343 
2344 	if (rval != QL_SUCCESS) {
2345 		EL(ha, "failed, rval = %xh\n", rval);
2346 	} else {
2347 		/*EMPTY*/
2348 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2349 	}
2350 
2351 	return (rval);
2352 }
2353 
2354 /*
2355  * ql_load_flash_fw
2356  *	Gets ISP24xx firmware from flash and loads ISP.
2357  *
2358  * Input:
2359  *	ha:	adapter state pointer.
2360  *
2361  * Returns:
2362  *	ql local function return status code.
2363  */
2364 static int
2365 ql_load_flash_fw(ql_adapter_state_t *ha)
2366 {
2367 	int		rval;
2368 	uint8_t		seg_cnt;
2369 	uint32_t	risc_address, xfer_size, count,	*bp, faddr;
2370 	uint32_t	risc_code_size = 0;
2371 
2372 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2373 
2374 	faddr = ha->flash_data_addr | ha->flash_fw_addr;
2375 
2376 	for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2377 		xfer_size = ha->fw_transfer_size >> 2;
2378 		do {
2379 			GLOBAL_HW_LOCK();
2380 
2381 			/* Read data from flash. */
2382 			bp = (uint32_t *)ha->request_ring_bp;
2383 			for (count = 0; count < xfer_size; count++) {
2384 				rval = ql_24xx_read_flash(ha, faddr++, bp);
2385 				if (rval != QL_SUCCESS) {
2386 					break;
2387 				}
2388 				ql_chg_endian((uint8_t *)bp++, 4);
2389 			}
2390 
2391 			GLOBAL_HW_UNLOCK();
2392 
2393 			if (rval != QL_SUCCESS) {
2394 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
2395 				break;
2396 			}
2397 
2398 			if (risc_code_size == 0) {
2399 				bp = (uint32_t *)ha->request_ring_bp;
2400 				risc_address = bp[2];
2401 				risc_code_size = bp[3];
2402 				ha->risc_fw[seg_cnt].addr = risc_address;
2403 			}
2404 
2405 			if (risc_code_size < xfer_size) {
2406 				faddr -= xfer_size - risc_code_size;
2407 				xfer_size = risc_code_size;
2408 			}
2409 
2410 			(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2411 			    REQUEST_Q_BUFFER_OFFSET, xfer_size << 2,
2412 			    DDI_DMA_SYNC_FORDEV);
2413 
2414 			rval = ql_wrt_risc_ram(ha, risc_address,
2415 			    ha->request_dvma, xfer_size);
2416 			if (rval != QL_SUCCESS) {
2417 				EL(ha, "ql_wrt_risc_ram failed=%xh\n", rval);
2418 				break;
2419 			}
2420 
2421 			risc_address += xfer_size;
2422 			risc_code_size -= xfer_size;
2423 		} while (risc_code_size);
2424 
2425 		if (rval != QL_SUCCESS) {
2426 			break;
2427 		}
2428 	}
2429 
2430 	/* Start firmware. */
2431 	if (rval == QL_SUCCESS) {
2432 		rval = ql_start_firmware(ha);
2433 	}
2434 
2435 	if (rval != QL_SUCCESS) {
2436 		EL(ha, "failed, rval = %xh\n", rval);
2437 	} else {
2438 		/*EMPTY*/
2439 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2440 	}
2441 	return (rval);
2442 }
2443 
2444 /*
2445  * ql_start_firmware
2446  *	Starts RISC code.
2447  *
2448  * Input:
2449  *	ha = adapter state pointer.
2450  *
2451  * Returns:
2452  *	ql local function return status code.
2453  *
2454  * Context:
2455  *	Kernel context.
2456  */
2457 int
2458 ql_start_firmware(ql_adapter_state_t *vha)
2459 {
2460 	int			rval, rval2;
2461 	uint32_t		data;
2462 	ql_mbx_data_t		mr;
2463 	ql_adapter_state_t	*ha = vha->pha;
2464 
2465 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2466 
2467 	/* Verify checksum of loaded RISC code. */
2468 	rval = ql_verify_checksum(ha);
2469 	if (rval == QL_SUCCESS) {
2470 		/* Start firmware execution. */
2471 		(void) ql_execute_fw(ha);
2472 
2473 		/* Save firmware version. */
2474 		(void) ql_get_fw_version(ha, &mr);
2475 		ha->fw_major_version = mr.mb[1];
2476 		ha->fw_minor_version = mr.mb[2];
2477 		ha->fw_subminor_version = mr.mb[3];
2478 		ha->fw_ext_memory_size = ((SHORT_TO_LONG(mr.mb[4], mr.mb[5]) -
2479 		    0x100000) + 1) * 4;
2480 		ha->fw_attributes = mr.mb[6];
2481 
2482 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
2483 			ha->phy_fw_major_version = LSB(mr.mb[8]);
2484 			ha->phy_fw_minor_version = MSB(mr.mb[9]);
2485 			ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2486 			ha->mpi_fw_major_version = LSB(mr.mb[10]);
2487 			ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2488 			ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2489 			ha->mpi_capability_list = SHORT_TO_LONG(mr.mb[13],
2490 			    mr.mb[12]);
2491 			if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2492 			    0, 0, &data)) == QL_SUCCESS) {
2493 				ha->xioctl->fdesc.block_size = data << 2;
2494 				QL_PRINT_10(CE_CONT, "(%d): fdesc.block_size="
2495 				    "%xh\n", ha->instance,
2496 				    ha->xioctl->fdesc.block_size);
2497 			} else {
2498 				EL(ha, "flash_access status=%xh\n", rval2);
2499 			}
2500 		}
2501 
2502 		/* Set Serdes Transmit Parameters. */
2503 		if (CFG_IST(ha, CFG_CTRL_2422) && ha->serdes_param[0] & BIT_0) {
2504 			mr.mb[1] = ha->serdes_param[0];
2505 			mr.mb[2] = ha->serdes_param[1];
2506 			mr.mb[3] = ha->serdes_param[2];
2507 			mr.mb[4] = ha->serdes_param[3];
2508 			(void) ql_serdes_param(ha, &mr);
2509 		}
2510 	}
2511 
2512 	if (rval != QL_SUCCESS) {
2513 		ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2514 		EL(ha, "failed, rval = %xh\n", rval);
2515 	} else {
2516 		ha->task_daemon_flags |= FIRMWARE_LOADED;
2517 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2518 	}
2519 	return (rval);
2520 }
2521 
2522 /*
2523  * ql_set_cache_line
2524  *	Sets PCI cache line parameter.
2525  *
2526  * Input:
2527  *	ha = adapter state pointer.
2528  *
2529  * Returns:
2530  *	ql local function return status code.
2531  *
2532  * Context:
2533  *	Kernel context.
2534  */
2535 int
2536 ql_set_cache_line(ql_adapter_state_t *ha)
2537 {
2538 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2539 
2540 	/* Set the cache line. */
2541 	if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2542 		/* Set cache line register. */
2543 		ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2544 	}
2545 
2546 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2547 
2548 	return (QL_SUCCESS);
2549 }
2550 
2551 /*
2552  * ql_init_rings
2553  *	Initializes firmware and ring pointers.
2554  *
2555  *	Beginning of response ring has initialization control block
2556  *	already built by nvram config routine.
2557  *
2558  * Input:
2559  *	ha = adapter state pointer.
2560  *	ha->hba_buf = request and response rings
2561  *	ha->init_ctrl_blk = initialization control block
2562  *
2563  * Returns:
2564  *	ql local function return status code.
2565  *
2566  * Context:
2567  *	Kernel context.
2568  */
2569 int
2570 ql_init_rings(ql_adapter_state_t *vha2)
2571 {
2572 	int			rval, rval2;
2573 	uint16_t		index;
2574 	ql_mbx_data_t		mr;
2575 	ql_adapter_state_t	*ha = vha2->pha;
2576 
2577 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2578 
2579 	/* Clear outstanding commands array. */
2580 	for (index = 0; index < MAX_OUTSTANDING_COMMANDS; index++) {
2581 		ha->outstanding_cmds[index] = NULL;
2582 	}
2583 	ha->osc_index = 1;
2584 
2585 	ha->pending_cmds.first = NULL;
2586 	ha->pending_cmds.last = NULL;
2587 
2588 	/* Initialize firmware. */
2589 	ha->request_ring_ptr = ha->request_ring_bp;
2590 	ha->req_ring_index = 0;
2591 	ha->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2592 	ha->response_ring_ptr = ha->response_ring_bp;
2593 	ha->rsp_ring_index = 0;
2594 
2595 	if (ha->flags & VP_ENABLED) {
2596 		ql_adapter_state_t	*vha;
2597 		uint16_t		cnt;
2598 		uint32_t		max_vports;
2599 		ql_init_24xx_cb_t	*icb = &ha->init_ctrl_blk.cb24;
2600 
2601 		max_vports = (CFG_IST(ha, CFG_CTRL_2422) ?
2602 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS);
2603 		bzero(icb->vp_count,
2604 		    ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2605 		    (uintptr_t)icb->vp_count);
2606 		icb->vp_count[0] = (uint8_t)max_vports;
2607 
2608 		/* Allow connection option 2. */
2609 		icb->global_vp_option[0] = BIT_1;
2610 
2611 		for (cnt = 0, vha = ha->vp_next; cnt < max_vports &&
2612 		    vha != NULL; vha = vha->vp_next, cnt++) {
2613 
2614 			index = (uint8_t)(vha->vp_index - 1);
2615 			bcopy(vha->loginparams.node_ww_name.raw_wwn,
2616 			    icb->vpc[index].node_name, 8);
2617 			bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2618 			    icb->vpc[index].port_name, 8);
2619 
2620 			icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2621 			    VPO_INITIATOR_MODE_ENABLED;
2622 			if (vha->flags & VP_ENABLED) {
2623 				icb->vpc[index].options = (uint8_t)
2624 				    (icb->vpc[index].options | VPO_ENABLED);
2625 			}
2626 		}
2627 	}
2628 
2629 	rval = ql_init_firmware(ha);
2630 
2631 	if (rval == QL_SUCCESS && (CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2632 		/* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2633 		rval = ql_get_firmware_option(ha, &mr);
2634 		if (rval == QL_SUCCESS) {
2635 			mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2636 			mr.mb[2] = 0;
2637 			mr.mb[3] = BIT_10;
2638 			rval = ql_set_firmware_option(ha, &mr);
2639 		}
2640 	}
2641 
2642 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2643 		/* Firmware Fibre Channel Event Trace Buffer */
2644 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2645 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2646 			EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2647 		} else {
2648 			if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2649 			    FTO_FCE_TRACE_ENABLE)) != QL_SUCCESS) {
2650 				EL(ha, "fcetrace enable failed: %xh\n", rval2);
2651 				ql_free_phys(ha, &ha->fwfcetracebuf);
2652 			}
2653 		}
2654 	}
2655 
2656 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2657 		/* Firmware Extended Trace Buffer */
2658 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2659 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2660 			EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2661 		} else {
2662 			if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2663 			    FTO_EXT_TRACE_ENABLE)) != QL_SUCCESS) {
2664 				EL(ha, "exttrace enable failed: %xh\n", rval2);
2665 				ql_free_phys(ha, &ha->fwexttracebuf);
2666 			}
2667 		}
2668 	}
2669 
2670 	if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2671 		ql_mbx_iocb_t	*pkt;
2672 		clock_t		timer;
2673 
2674 		/* Wait for firmware login of menlo. */
2675 		for (timer = 3000; timer; timer--) {
2676 			if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2677 				break;
2678 			}
2679 
2680 			if (!(ha->flags & INTERRUPTS_ENABLED) ||
2681 			    ddi_in_panic()) {
2682 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
2683 					(void) ql_isr((caddr_t)ha);
2684 					INTR_LOCK(ha);
2685 					ha->intr_claimed = B_TRUE;
2686 					INTR_UNLOCK(ha);
2687 				}
2688 			}
2689 
2690 			/* Delay for 1 tick (10 milliseconds). */
2691 			ql_delay(ha, 10000);
2692 		}
2693 
2694 		if (timer == 0) {
2695 			rval = QL_FUNCTION_TIMEOUT;
2696 		} else {
2697 			pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2698 			if (pkt == NULL) {
2699 				EL(ha, "failed, kmem_zalloc\n");
2700 				rval = QL_MEMORY_ALLOC_FAILED;
2701 			} else {
2702 				pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2703 				pkt->mvfy.entry_count = 1;
2704 				pkt->mvfy.options_status =
2705 				    LE_16(VMF_DO_NOT_UPDATE_FW);
2706 
2707 				rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2708 				    sizeof (ql_mbx_iocb_t));
2709 				LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2710 				LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2711 
2712 				if (rval != QL_SUCCESS ||
2713 				    (pkt->mvfy.entry_status & 0x3c) != 0 ||
2714 				    pkt->mvfy.options_status != CS_COMPLETE) {
2715 					EL(ha, "failed, status=%xh, es=%xh, "
2716 					    "cs=%xh, fc=%xh\n", rval,
2717 					    pkt->mvfy.entry_status & 0x3c,
2718 					    pkt->mvfy.options_status,
2719 					    pkt->mvfy.failure_code);
2720 					if (rval == QL_SUCCESS) {
2721 						rval = QL_FUNCTION_FAILED;
2722 					}
2723 				}
2724 
2725 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2726 			}
2727 		}
2728 	}
2729 
2730 	if (rval != QL_SUCCESS) {
2731 		TASK_DAEMON_LOCK(ha);
2732 		ha->task_daemon_flags &= ~FIRMWARE_UP;
2733 		TASK_DAEMON_UNLOCK(ha);
2734 		EL(ha, "failed, rval = %xh\n", rval);
2735 	} else {
2736 		TASK_DAEMON_LOCK(ha);
2737 		ha->task_daemon_flags |= FIRMWARE_UP;
2738 		TASK_DAEMON_UNLOCK(ha);
2739 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2740 	}
2741 	return (rval);
2742 }
2743 
2744 /*
2745  * ql_fw_ready
2746  *	Waits for firmware ready. If firmware becomes ready
2747  *	device queues and RISC code are synchronized.
2748  *
2749  * Input:
2750  *	ha = adapter state pointer.
2751  *	secs = max wait time, in seconds (0-255).
2752  *
2753  * Returns:
2754  *	ql local function return status code.
2755  *
2756  * Context:
2757  *	Kernel context.
2758  */
2759 int
2760 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2761 {
2762 	ql_mbx_data_t	mr;
2763 	clock_t		timer;
2764 	clock_t		dly = 250000;
2765 	clock_t		sec_delay = MICROSEC / dly;
2766 	clock_t		wait = secs * sec_delay;
2767 	int		rval = QL_FUNCTION_FAILED;
2768 	uint16_t	state = 0xffff;
2769 
2770 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2771 
2772 	timer = ha->r_a_tov < secs ? secs : ha->r_a_tov;
2773 	timer = (timer + 2) * sec_delay;
2774 
2775 	/* Wait for ISP to finish LIP */
2776 	while (timer != 0 && wait != 0 &&
2777 	    !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
2778 
2779 		rval = ql_get_firmware_state(ha, &mr);
2780 		if (rval == QL_SUCCESS) {
2781 			if (ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2782 			    LOOP_DOWN)) {
2783 				wait--;
2784 			} else if (mr.mb[1] != FSTATE_READY) {
2785 				if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2786 					wait--;
2787 				}
2788 				rval = QL_FUNCTION_FAILED;
2789 			} else {
2790 				/* Firmware is ready. Get 2 * R_A_TOV. */
2791 				rval = ql_get_timeout_parameters(ha,
2792 				    &ha->r_a_tov);
2793 				if (rval != QL_SUCCESS) {
2794 					EL(ha, "failed, get_timeout_param"
2795 					    "=%xh\n", rval);
2796 				}
2797 
2798 				/* Configure loop. */
2799 				rval = ql_configure_loop(ha);
2800 				(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2801 
2802 				if (ha->task_daemon_flags &
2803 				    LOOP_RESYNC_NEEDED) {
2804 					wait--;
2805 					EL(ha, "loop trans; tdf=%xh\n",
2806 					    ha->task_daemon_flags);
2807 				} else {
2808 					break;
2809 				}
2810 			}
2811 		} else {
2812 			wait--;
2813 		}
2814 
2815 		if (state != mr.mb[1]) {
2816 			EL(ha, "mailbox_reg[1] = %xh\n", mr.mb[1]);
2817 			state = mr.mb[1];
2818 		}
2819 
2820 		/* Delay for a tick if waiting. */
2821 		if (timer-- != 0 && wait != 0) {
2822 			if (timer % 4 == 0) {
2823 				delay(drv_usectohz(dly));
2824 			} else {
2825 				drv_usecwait(dly);
2826 			}
2827 		} else {
2828 			rval = QL_FUNCTION_TIMEOUT;
2829 		}
2830 	}
2831 
2832 	if (rval != QL_SUCCESS) {
2833 		EL(ha, "failed, rval = %xh\n", rval);
2834 	} else {
2835 		/*EMPTY*/
2836 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2837 	}
2838 	return (rval);
2839 }
2840 
2841 /*
2842  * ql_configure_loop
2843  *	Setup configurations based on loop.
2844  *
2845  * Input:
2846  *	ha = adapter state pointer.
2847  *
2848  * Returns:
2849  *	ql local function return status code.
2850  *
2851  * Context:
2852  *	Kernel context.
2853  */
2854 static int
2855 ql_configure_loop(ql_adapter_state_t *ha)
2856 {
2857 	int			rval;
2858 	ql_adapter_state_t	*vha;
2859 
2860 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2861 
2862 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2863 		TASK_DAEMON_LOCK(ha);
2864 		if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
2865 		    vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2866 			TASK_DAEMON_UNLOCK(ha);
2867 			continue;
2868 		}
2869 		vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
2870 		TASK_DAEMON_UNLOCK(ha);
2871 
2872 		rval = ql_configure_hba(vha);
2873 		if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2874 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2875 			rval = ql_configure_device_d_id(vha);
2876 			if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2877 			    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2878 				(void) ql_configure_fabric(vha);
2879 			}
2880 		}
2881 	}
2882 
2883 	if (rval != QL_SUCCESS) {
2884 		EL(ha, "failed, rval = %xh\n", rval);
2885 	} else {
2886 		/*EMPTY*/
2887 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2888 	}
2889 	return (rval);
2890 }
2891 
2892 /*
2893  * ql_configure_n_port_info
2894  *	Setup configurations based on N port 2 N port topology.
2895  *
2896  * Input:
2897  *	ha = adapter state pointer.
2898  *
2899  * Returns:
2900  *	ql local function return status code.
2901  *
2902  * Context:
2903  *	Kernel context.
2904  */
2905 static void
2906 ql_configure_n_port_info(ql_adapter_state_t *ha)
2907 {
2908 	ql_tgt_t	tmp_tq;
2909 	ql_tgt_t	*tq;
2910 	uint8_t		*cb_port_name;
2911 	ql_link_t	*link;
2912 	int		index, rval;
2913 
2914 	tq = &tmp_tq;
2915 
2916 	/* Free existing target queues. */
2917 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2918 		link = ha->dev[index].first;
2919 		while (link != NULL) {
2920 			tq = link->base_address;
2921 			link = link->next;
2922 			ql_remove_link(&ha->dev[index], &tq->device);
2923 			ql_dev_free(ha, tq);
2924 		}
2925 	}
2926 
2927 	/*
2928 	 * If the N_Port's WWPN is larger than our's then it has the
2929 	 * N_Port login initiative.  It will have determined that and
2930 	 * logged in with the firmware.  This results in a device
2931 	 * database entry.  In this situation we will later send up a PLOGI
2932 	 * by proxy for the N_Port to get things going.
2933 	 *
2934 	 * If the N_Ports WWPN is smaller then the firmware has the
2935 	 * N_Port login initiative and does a FLOGI in order to obtain the
2936 	 * N_Ports WWNN and WWPN.  These names are required later
2937 	 * during Leadvilles FLOGI.  No PLOGI is done by the firmware in
2938 	 * anticipation of a PLOGI via the driver from the upper layers.
2939 	 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
2940 	 * pass-through command and the firmware assumes the s_id
2941 	 * and the N_Port assumes the d_id and Bob's your uncle.
2942 	 */
2943 
2944 	/*
2945 	 * In N port 2 N port topology the FW provides a port database entry at
2946 	 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
2947 	 */
2948 	tq->d_id.b.al_pa = 0;
2949 	tq->d_id.b.area = 0;
2950 	tq->d_id.b.domain = 0;
2951 	tq->loop_id = 0x7fe;
2952 
2953 	rval = ql_get_port_database(ha, tq, PDF_NONE);
2954 	if (rval == QL_SUCCESS || rval == QL_NOT_LOGGED_IN) {
2955 		ql_dev_id_list_t	*list;
2956 		uint32_t		list_size;
2957 		ql_mbx_data_t		mr;
2958 		port_id_t		d_id = {0, 0, 0, 0};
2959 		uint16_t		loop_id = 0;
2960 
2961 		cb_port_name = (uint8_t *)(CFG_IST(ha, CFG_CTRL_242581) ?
2962 		    &ha->init_ctrl_blk.cb24.port_name[0] :
2963 		    &ha->init_ctrl_blk.cb.port_name[0]);
2964 
2965 		if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
2966 		    (la_wwn_t *)cb_port_name) == 1)) {
2967 			EL(ha, "target port has N_Port login initiative\n");
2968 		} else {
2969 			EL(ha, "host port has N_Port login initiative\n");
2970 		}
2971 
2972 		/* Capture the N Ports WWPN */
2973 
2974 		bcopy((void *)&tq->port_name[0],
2975 		    (void *)&ha->n_port->port_name[0], 8);
2976 		bcopy((void *)&tq->node_name[0],
2977 		    (void *)&ha->n_port->node_name[0], 8);
2978 
2979 		/* Resolve an n_port_handle */
2980 		ha->n_port->n_port_handle = 0x7fe;
2981 
2982 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
2983 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
2984 
2985 		if (list != NULL &&
2986 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
2987 		    QL_SUCCESS) {
2988 			if (mr.mb[1]) {
2989 				EL(ha, "id list entries = %d\n", mr.mb[1]);
2990 				for (index = 0; index < mr.mb[1]; index++) {
2991 					ql_dev_list(ha, list, index,
2992 					    &d_id, &loop_id);
2993 					ha->n_port->n_port_handle = loop_id;
2994 				}
2995 			} else {
2996 				for (index = 0; index <= LAST_LOCAL_LOOP_ID;
2997 				    index++) {
2998 					/* resuse tq */
2999 					tq->loop_id = (uint16_t)index;
3000 					rval = ql_get_port_database(ha, tq,
3001 					    PDF_NONE);
3002 					if (rval == QL_NOT_LOGGED_IN) {
3003 						if (tq->master_state ==
3004 						    PD_STATE_PLOGI_PENDING) {
3005 							ha->n_port->
3006 							    n_port_handle =
3007 							    tq->loop_id;
3008 							break;
3009 						}
3010 					} else {
3011 						ha->n_port->n_port_handle =
3012 						    tq->loop_id;
3013 						break;
3014 					}
3015 				}
3016 			}
3017 		} else {
3018 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
3019 			    QL_NAME, ha->instance, d_id.b24);
3020 		}
3021 		if (list != NULL) {
3022 			kmem_free(list, list_size);
3023 		}
3024 	}
3025 }
3026 
3027 
3028 /*
3029  * ql_configure_hba
3030  *	Setup adapter context.
3031  *
3032  * Input:
3033  *	ha = adapter state pointer.
3034  *
3035  * Returns:
3036  *	ql local function return status code.
3037  *
3038  * Context:
3039  *	Kernel context.
3040  */
3041 static int
3042 ql_configure_hba(ql_adapter_state_t *ha)
3043 {
3044 	uint8_t		*bp;
3045 	int		rval;
3046 	uint32_t	state;
3047 	ql_mbx_data_t	mr;
3048 
3049 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3050 
3051 	/* Get host addresses. */
3052 	rval = ql_get_adapter_id(ha, &mr);
3053 	if (rval == QL_SUCCESS) {
3054 		ha->topology = (uint8_t)(ha->topology &
3055 		    ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3056 
3057 		/* Save Host d_id, alpa, loop ID. */
3058 		ha->loop_id = mr.mb[1];
3059 		ha->d_id.b.al_pa = LSB(mr.mb[2]);
3060 		ha->d_id.b.area = MSB(mr.mb[2]);
3061 		ha->d_id.b.domain = LSB(mr.mb[3]);
3062 
3063 		ADAPTER_STATE_LOCK(ha);
3064 		ha->flags &= ~FDISC_ENABLED;
3065 
3066 		/* Get loop topology. */
3067 		switch (mr.mb[6]) {
3068 		case CNX_LOOP_NO_FABRIC:
3069 			ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3070 			break;
3071 		case CNX_FLPORT_IN_LOOP:
3072 			ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3073 			break;
3074 		case CNX_NPORT_2_NPORT_P2P:
3075 		case CNX_NPORT_2_NPORT_NO_TGT_RSP:
3076 			ha->flags |= POINT_TO_POINT;
3077 			ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3078 			if (CFG_IST(ha, CFG_CTRL_2425)) {
3079 				ql_configure_n_port_info(ha);
3080 			}
3081 			break;
3082 		case CNX_FLPORT_P2P:
3083 			ha->flags |= POINT_TO_POINT;
3084 			ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3085 
3086 			/* Get supported option. */
3087 			if (CFG_IST(ha, CFG_CTRL_242581) &&
3088 			    mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3089 				ha->flags |= FDISC_ENABLED;
3090 			}
3091 			/* Get VLAN ID, mac address */
3092 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3093 				ha->fabric_params = mr.mb[7];
3094 				ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3095 				ha->fcoe_fcf_idx = mr.mb[10];
3096 				ha->fcoe_vnport_mac[0] = MSB(mr.mb[11]);
3097 				ha->fcoe_vnport_mac[1] = LSB(mr.mb[11]);
3098 				ha->fcoe_vnport_mac[2] = MSB(mr.mb[12]);
3099 				ha->fcoe_vnport_mac[3] = LSB(mr.mb[12]);
3100 				ha->fcoe_vnport_mac[4] = MSB(mr.mb[13]);
3101 				ha->fcoe_vnport_mac[5] = LSB(mr.mb[13]);
3102 			}
3103 			break;
3104 		default:
3105 			QL_PRINT_2(CE_CONT, "(%d,%d): UNKNOWN topology=%xh, "
3106 			    "d_id=%xh\n", ha->instance, ha->vp_index, mr.mb[6],
3107 			    ha->d_id.b24);
3108 			rval = QL_FUNCTION_FAILED;
3109 			break;
3110 		}
3111 		ADAPTER_STATE_UNLOCK(ha);
3112 
3113 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
3114 		    CFG_CTRL_242581))) {
3115 			mr.mb[1] = 0;
3116 			mr.mb[2] = 0;
3117 			rval = ql_data_rate(ha, &mr);
3118 			if (rval != QL_SUCCESS) {
3119 				EL(ha, "data_rate status=%xh\n", rval);
3120 				state = FC_STATE_FULL_SPEED;
3121 			} else {
3122 				if (mr.mb[1] == IIDMA_RATE_1GB) {
3123 					state = FC_STATE_1GBIT_SPEED;
3124 				} else if (mr.mb[1] == IIDMA_RATE_2GB) {
3125 					state = FC_STATE_2GBIT_SPEED;
3126 				} else if (mr.mb[1] == IIDMA_RATE_4GB) {
3127 					state = FC_STATE_4GBIT_SPEED;
3128 				} else if (mr.mb[1] == IIDMA_RATE_8GB) {
3129 					state = FC_STATE_8GBIT_SPEED;
3130 				} else if (mr.mb[1] == IIDMA_RATE_10GB) {
3131 					state = FC_STATE_10GBIT_SPEED;
3132 				} else {
3133 					state = 0;
3134 				}
3135 			}
3136 		} else {
3137 			state = FC_STATE_FULL_SPEED;
3138 		}
3139 		ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3140 	} else if (rval == MBS_COMMAND_ERROR) {
3141 		EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3142 		    rval, mr.mb[1]);
3143 	}
3144 
3145 	if (rval != QL_SUCCESS) {
3146 		EL(ha, "failed, rval = %xh\n", rval);
3147 	} else {
3148 		bp = ha->loginparams.nport_ww_name.raw_wwn;
3149 		EL(ha, "topology=%xh, d_id=%xh, "
3150 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3151 		    ha->topology, ha->d_id.b24, bp[0], bp[1],
3152 		    bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3153 	}
3154 	return (rval);
3155 }
3156 
3157 /*
3158  * ql_configure_device_d_id
3159  *	Updates device loop ID.
3160  *	Also adds to device queue any new devices found on private loop.
3161  *
3162  * Input:
3163  *	ha = adapter state pointer.
3164  *
3165  * Returns:
3166  *	ql local function return status code.
3167  *
3168  * Context:
3169  *	Kernel context.
3170  */
3171 static int
3172 ql_configure_device_d_id(ql_adapter_state_t *ha)
3173 {
3174 	port_id_t		d_id;
3175 	ql_link_t		*link;
3176 	int			rval;
3177 	int			loop;
3178 	ql_tgt_t		*tq;
3179 	ql_dev_id_list_t	*list;
3180 	uint32_t		list_size;
3181 	uint16_t		index, loop_id;
3182 	ql_mbx_data_t		mr;
3183 	uint8_t			retries = MAX_DEVICE_LOST_RETRY;
3184 
3185 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3186 
3187 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3188 	list = kmem_zalloc(list_size, KM_SLEEP);
3189 	if (list == NULL) {
3190 		rval = QL_MEMORY_ALLOC_FAILED;
3191 		EL(ha, "failed, rval = %xh\n", rval);
3192 		return (rval);
3193 	}
3194 
3195 	do {
3196 		/*
3197 		 * Get data from RISC code d_id list to init each device queue.
3198 		 */
3199 		rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3200 		if (rval != QL_SUCCESS) {
3201 			kmem_free(list, list_size);
3202 			EL(ha, "failed, rval = %xh\n", rval);
3203 			return (rval);
3204 		}
3205 
3206 		/* Acquire adapter state lock. */
3207 		ADAPTER_STATE_LOCK(ha);
3208 
3209 		/* Mark all queues as unusable. */
3210 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3211 			for (link = ha->dev[index].first; link != NULL;
3212 			    link = link->next) {
3213 				tq = link->base_address;
3214 				DEVICE_QUEUE_LOCK(tq);
3215 				if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3216 				    !(ha->topology & QL_N_PORT)) {
3217 					tq->loop_id = (uint16_t)
3218 					    (tq->loop_id | PORT_LOST_ID);
3219 				}
3220 				DEVICE_QUEUE_UNLOCK(tq);
3221 			}
3222 		}
3223 
3224 		/* If device not in queues add new queue. */
3225 		for (index = 0; index < mr.mb[1]; index++) {
3226 			ql_dev_list(ha, list, index, &d_id, &loop_id);
3227 
3228 			if (VALID_DEVICE_ID(ha, loop_id)) {
3229 				tq = ql_dev_init(ha, d_id, loop_id);
3230 				if (tq != NULL) {
3231 					tq->loop_id = loop_id;
3232 
3233 					/* Test for fabric device. */
3234 					if (d_id.b.domain !=
3235 					    ha->d_id.b.domain ||
3236 					    d_id.b.area != ha->d_id.b.area) {
3237 						tq->flags |= TQF_FABRIC_DEVICE;
3238 					}
3239 
3240 					ADAPTER_STATE_UNLOCK(ha);
3241 					if (ql_get_port_database(ha, tq,
3242 					    PDF_NONE) == QL_SUCCESS) {
3243 						ADAPTER_STATE_LOCK(ha);
3244 						tq->loop_id = (uint16_t)
3245 						    (tq->loop_id &
3246 						    ~PORT_LOST_ID);
3247 					} else {
3248 						ADAPTER_STATE_LOCK(ha);
3249 					}
3250 				}
3251 			}
3252 		}
3253 
3254 		/* 24xx does not report switch devices in ID list. */
3255 		if ((CFG_IST(ha, CFG_CTRL_242581)) &&
3256 		    ha->topology & (QL_F_PORT | QL_FL_PORT)) {
3257 			d_id.b24 = 0xfffffe;
3258 			tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3259 			if (tq != NULL) {
3260 				tq->flags |= TQF_FABRIC_DEVICE;
3261 				ADAPTER_STATE_UNLOCK(ha);
3262 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3263 				ADAPTER_STATE_LOCK(ha);
3264 			}
3265 			d_id.b24 = 0xfffffc;
3266 			tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3267 			if (tq != NULL) {
3268 				tq->flags |= TQF_FABRIC_DEVICE;
3269 				ADAPTER_STATE_UNLOCK(ha);
3270 				if (ha->vp_index != 0) {
3271 					(void) ql_login_fport(ha, tq,
3272 					    SNS_24XX_HDL, LFF_NONE, NULL);
3273 				}
3274 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3275 				ADAPTER_STATE_LOCK(ha);
3276 			}
3277 		}
3278 
3279 		/* If F_port exists, allocate queue for FL_Port. */
3280 		index = ql_alpa_to_index[0xfe];
3281 		d_id.b24 = 0;
3282 		if (ha->dev[index].first != NULL) {
3283 			tq = ql_dev_init(ha, d_id, (uint16_t)
3284 			    (CFG_IST(ha, CFG_CTRL_242581) ?
3285 			    FL_PORT_24XX_HDL : FL_PORT_LOOP_ID));
3286 			if (tq != NULL) {
3287 				tq->flags |= TQF_FABRIC_DEVICE;
3288 				ADAPTER_STATE_UNLOCK(ha);
3289 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3290 				ADAPTER_STATE_LOCK(ha);
3291 			}
3292 		}
3293 
3294 		/* Allocate queue for broadcast. */
3295 		d_id.b24 = 0xffffff;
3296 		(void) ql_dev_init(ha, d_id, (uint16_t)
3297 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
3298 		    IP_BROADCAST_LOOP_ID));
3299 
3300 		/* Check for any devices lost. */
3301 		loop = FALSE;
3302 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3303 			for (link = ha->dev[index].first; link != NULL;
3304 			    link = link->next) {
3305 				tq = link->base_address;
3306 
3307 				if ((tq->loop_id & PORT_LOST_ID) &&
3308 				    !(tq->flags & (TQF_INITIATOR_DEVICE |
3309 				    TQF_FABRIC_DEVICE))) {
3310 					loop = TRUE;
3311 				}
3312 			}
3313 		}
3314 
3315 		/* Release adapter state lock. */
3316 		ADAPTER_STATE_UNLOCK(ha);
3317 
3318 		/* Give devices time to recover. */
3319 		if (loop == TRUE) {
3320 			drv_usecwait(1000000);
3321 		}
3322 	} while (retries-- && loop == TRUE &&
3323 	    !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3324 
3325 	kmem_free(list, list_size);
3326 
3327 	if (rval != QL_SUCCESS) {
3328 		EL(ha, "failed=%xh\n", rval);
3329 	} else {
3330 		/*EMPTY*/
3331 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3332 	}
3333 
3334 	return (rval);
3335 }
3336 
3337 /*
3338  * ql_dev_list
3339  *	Gets device d_id and loop ID from firmware device list.
3340  *
3341  * Input:
3342  *	ha:	adapter state pointer.
3343  *	list	device list pointer.
3344  *	index:	list index of device data.
3345  *	d_id:	pointer for d_id data.
3346  *	id:	pointer for loop ID.
3347  *
3348  * Context:
3349  *	Kernel context.
3350  */
3351 void
3352 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3353     uint32_t index, port_id_t *d_id, uint16_t *id)
3354 {
3355 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3356 		struct ql_24_dev_id	*list24 = (struct ql_24_dev_id *)list;
3357 
3358 		d_id->b.al_pa = list24[index].al_pa;
3359 		d_id->b.area = list24[index].area;
3360 		d_id->b.domain = list24[index].domain;
3361 		*id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3362 		    list24[index].n_port_hdl_h);
3363 
3364 	} else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3365 		struct ql_ex_dev_id	*list23 = (struct ql_ex_dev_id *)list;
3366 
3367 		d_id->b.al_pa = list23[index].al_pa;
3368 		d_id->b.area = list23[index].area;
3369 		d_id->b.domain = list23[index].domain;
3370 		*id = CHAR_TO_SHORT(list23[index].loop_id_l,
3371 		    list23[index].loop_id_h);
3372 
3373 	} else {
3374 		struct ql_dev_id	*list22 = (struct ql_dev_id *)list;
3375 
3376 		d_id->b.al_pa = list22[index].al_pa;
3377 		d_id->b.area = list22[index].area;
3378 		d_id->b.domain = list22[index].domain;
3379 		*id = (uint16_t)list22[index].loop_id;
3380 	}
3381 }
3382 
3383 /*
3384  * ql_configure_fabric
3385  *	Setup fabric context.
3386  *
3387  * Input:
3388  *	ha = adapter state pointer.
3389  *
3390  * Returns:
3391  *	ql local function return status code.
3392  *
3393  * Context:
3394  *	Kernel context.
3395  */
3396 static int
3397 ql_configure_fabric(ql_adapter_state_t *ha)
3398 {
3399 	port_id_t	d_id;
3400 	ql_tgt_t	*tq;
3401 	int		rval = QL_FUNCTION_FAILED;
3402 
3403 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3404 
3405 	ha->topology = (uint8_t)(ha->topology & ~QL_SNS_CONNECTION);
3406 
3407 	/* Test switch fabric controller present. */
3408 	d_id.b24 = FS_FABRIC_F_PORT;
3409 	tq = ql_d_id_to_queue(ha, d_id);
3410 	if (tq != NULL) {
3411 		/* Get port/node names of F_Port. */
3412 		(void) ql_get_port_database(ha, tq, PDF_NONE);
3413 
3414 		d_id.b24 = FS_NAME_SERVER;
3415 		tq = ql_d_id_to_queue(ha, d_id);
3416 		if (tq != NULL) {
3417 			(void) ql_get_port_database(ha, tq, PDF_NONE);
3418 			ha->topology = (uint8_t)
3419 			    (ha->topology | QL_SNS_CONNECTION);
3420 			rval = QL_SUCCESS;
3421 		}
3422 	}
3423 
3424 	if (rval != QL_SUCCESS) {
3425 		EL(ha, "failed=%xh\n", rval);
3426 	} else {
3427 		/*EMPTY*/
3428 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3429 	}
3430 	return (rval);
3431 }
3432 
3433 /*
3434  * ql_reset_chip
3435  *	Reset ISP chip.
3436  *
3437  * Input:
3438  *	ha = adapter block pointer.
3439  *	All activity on chip must be already stopped.
3440  *	ADAPTER_STATE_LOCK must be released.
3441  *
3442  * Context:
3443  *	Interrupt or Kernel context, no mailbox commands allowed.
3444  */
3445 void
3446 ql_reset_chip(ql_adapter_state_t *vha)
3447 {
3448 	uint32_t		cnt;
3449 	uint16_t		cmd;
3450 	ql_adapter_state_t	*ha = vha->pha;
3451 
3452 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3453 
3454 	/*
3455 	 * accessing pci space while not powered can cause panic's
3456 	 * on some platforms (i.e. Sunblade 1000's)
3457 	 */
3458 	if (ha->power_level == PM_LEVEL_D3) {
3459 		QL_PRINT_2(CE_CONT, "(%d): Low Power exit\n", ha->instance);
3460 		return;
3461 	}
3462 
3463 	/* Reset all outbound mailbox registers */
3464 	for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3465 		WRT16_IO_REG(ha, mailbox[cnt], (uint16_t)0);
3466 	}
3467 
3468 	/* Disable ISP interrupts. */
3469 	WRT16_IO_REG(ha, ictrl, 0);
3470 	ADAPTER_STATE_LOCK(ha);
3471 	ha->flags &= ~INTERRUPTS_ENABLED;
3472 	ADAPTER_STATE_UNLOCK(ha);
3473 
3474 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3475 		RD32_IO_REG(ha, ictrl);
3476 		ql_reset_24xx_chip(ha);
3477 		QL_PRINT_3(CE_CONT, "(%d): 24xx exit\n", ha->instance);
3478 		return;
3479 	}
3480 
3481 	/*
3482 	 * We are going to reset the chip in case of 2300. That might cause
3483 	 * a PBM ERR if a DMA transaction is in progress. One way of
3484 	 * avoiding it is to disable Bus Master operation before we start
3485 	 * the reset activity.
3486 	 */
3487 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3488 	cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3489 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3490 
3491 	/* Pause RISC. */
3492 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3493 	for (cnt = 0; cnt < 30000; cnt++) {
3494 		if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3495 			break;
3496 		}
3497 		drv_usecwait(MILLISEC);
3498 	}
3499 
3500 	/*
3501 	 * A call to ql_isr() can still happen through
3502 	 * ql_mailbox_command(). So Mark that we are/(will-be)
3503 	 * running from rom code now.
3504 	 */
3505 	TASK_DAEMON_LOCK(ha);
3506 	ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3507 	TASK_DAEMON_UNLOCK(ha);
3508 
3509 	/* Select FPM registers. */
3510 	WRT16_IO_REG(ha, ctrl_status, 0x20);
3511 
3512 	/* FPM Soft Reset. */
3513 	WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3514 
3515 	/* Toggle FPM reset for 2300 */
3516 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3517 		WRT16_IO_REG(ha, fpm_diag_config, 0);
3518 	}
3519 
3520 	/* Select frame buffer registers. */
3521 	WRT16_IO_REG(ha, ctrl_status, 0x10);
3522 
3523 	/* Reset frame buffer FIFOs. */
3524 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3525 		WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3526 		/* read back fb_cmd until zero or 3 seconds max */
3527 		for (cnt = 0; cnt < 300000; cnt++) {
3528 			if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3529 				break;
3530 			}
3531 			drv_usecwait(10);
3532 		}
3533 	} else  {
3534 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
3535 	}
3536 
3537 	/* Select RISC module registers. */
3538 	WRT16_IO_REG(ha, ctrl_status, 0);
3539 
3540 	/* Reset RISC module. */
3541 	WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3542 
3543 	/* Reset ISP semaphore. */
3544 	WRT16_IO_REG(ha, semaphore, 0);
3545 
3546 	/* Release RISC module. */
3547 	WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3548 
3549 	/* Insure mailbox registers are free. */
3550 	WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3551 	WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3552 
3553 	/* clear the mailbox command pointer. */
3554 	ql_clear_mcp(ha);
3555 
3556 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3557 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3558 
3559 	/* Bus Master is disabled so chip reset is safe. */
3560 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3561 		WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3562 		drv_usecwait(MILLISEC);
3563 
3564 		/* Wait for reset to finish. */
3565 		for (cnt = 0; cnt < 30000; cnt++) {
3566 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3567 				break;
3568 			}
3569 			drv_usecwait(MILLISEC);
3570 		}
3571 	}
3572 
3573 	/* Wait for RISC to recover from reset. */
3574 	for (cnt = 0; cnt < 30000; cnt++) {
3575 		if (RD16_IO_REG(ha, mailbox[0]) != MBS_BUSY) {
3576 			break;
3577 		}
3578 		drv_usecwait(MILLISEC);
3579 	}
3580 
3581 	/* restore bus master */
3582 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3583 	cmd = (uint16_t)(cmd | PCI_COMM_ME);
3584 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3585 
3586 	/* Disable RISC pause on FPM parity error. */
3587 	WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3588 
3589 	/* Initialize probe registers */
3590 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
3591 		/* Pause RISC. */
3592 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3593 		for (cnt = 0; cnt < 30000; cnt++) {
3594 			if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3595 				break;
3596 			} else {
3597 				drv_usecwait(MILLISEC);
3598 			}
3599 		}
3600 
3601 		/* Select FPM registers. */
3602 		WRT16_IO_REG(ha, ctrl_status, 0x30);
3603 
3604 		/* Set probe register */
3605 		WRT16_IO_REG(ha, mailbox[23], 0x204c);
3606 
3607 		/* Select RISC module registers. */
3608 		WRT16_IO_REG(ha, ctrl_status, 0);
3609 
3610 		/* Release RISC module. */
3611 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3612 	}
3613 
3614 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3615 }
3616 
3617 /*
3618  * ql_reset_24xx_chip
3619  *	Reset ISP24xx chip.
3620  *
3621  * Input:
3622  *	ha = adapter block pointer.
3623  *	All activity on chip must be already stopped.
3624  *
3625  * Context:
3626  *	Interrupt or Kernel context, no mailbox commands allowed.
3627  */
3628 void
3629 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3630 {
3631 	uint32_t	timer, stat;
3632 
3633 	/* Shutdown DMA. */
3634 	WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3635 
3636 	/* Wait for DMA to stop. */
3637 	for (timer = 0; timer < 30000; timer++) {
3638 		if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3639 			break;
3640 		}
3641 		drv_usecwait(100);
3642 	}
3643 
3644 	/* Stop the firmware. */
3645 	WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3646 	WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
3647 	WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3648 	for (timer = 0; timer < 30000; timer++) {
3649 		stat = RD32_IO_REG(ha, intr_info_lo);
3650 		if (stat & BIT_15) {
3651 			if ((stat & 0xff) < 0x12) {
3652 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3653 				break;
3654 			}
3655 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3656 		}
3657 		drv_usecwait(100);
3658 	}
3659 
3660 	/* Reset the chip. */
3661 	WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
3662 	    MWB_4096_BYTES);
3663 	drv_usecwait(100);
3664 
3665 	/* Wait for idle status from ROM firmware. */
3666 	for (timer = 0; timer < 30000; timer++) {
3667 		if (RD16_IO_REG(ha, mailbox[0]) == 0) {
3668 			break;
3669 		}
3670 		drv_usecwait(100);
3671 	}
3672 
3673 	/* Wait for reset to finish. */
3674 	for (timer = 0; timer < 30000; timer++) {
3675 		if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3676 			break;
3677 		}
3678 		drv_usecwait(100);
3679 	}
3680 
3681 	/* clear the mailbox command pointer. */
3682 	ql_clear_mcp(ha);
3683 
3684 	/* Insure mailbox registers are free. */
3685 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3686 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3687 
3688 	if (ha->flags & MPI_RESET_NEEDED) {
3689 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3690 		WRT16_IO_REG(ha, mailbox[0], MBC_RESTART_MPI);
3691 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3692 		for (timer = 0; timer < 30000; timer++) {
3693 			stat = RD32_IO_REG(ha, intr_info_lo);
3694 			if (stat & BIT_15) {
3695 				if ((stat & 0xff) < 0x12) {
3696 					WRT32_IO_REG(ha, hccr,
3697 					    HC24_CLR_RISC_INT);
3698 					break;
3699 				}
3700 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3701 			}
3702 			drv_usecwait(100);
3703 		}
3704 		ADAPTER_STATE_LOCK(ha);
3705 		ha->flags &= ~MPI_RESET_NEEDED;
3706 		ADAPTER_STATE_UNLOCK(ha);
3707 	}
3708 
3709 	/*
3710 	 * Set flash write-protection.
3711 	 */
3712 	if ((ha->flags & ONLINE) == 0) {
3713 		ql_24xx_protect_flash(ha);
3714 	}
3715 }
3716 
3717 /*
3718  * ql_clear_mcp
3719  *	Carefully clear the mailbox command pointer in the ha struct.
3720  *
3721  * Input:
3722  *	ha = adapter block pointer.
3723  *
3724  * Context:
3725  *	Interrupt or Kernel context, no mailbox commands allowed.
3726  */
3727 
3728 static void
3729 ql_clear_mcp(ql_adapter_state_t *ha)
3730 {
3731 	uint32_t cnt;
3732 
3733 	/* Don't null ha->mcp without the lock, but don't hang either. */
3734 	if (MBX_REGISTER_LOCK_OWNER(ha) == curthread) {
3735 		ha->mcp = NULL;
3736 	} else {
3737 		for (cnt = 0; cnt < 300000; cnt++) {
3738 			if (TRY_MBX_REGISTER_LOCK(ha) != 0) {
3739 				ha->mcp = NULL;
3740 				MBX_REGISTER_UNLOCK(ha);
3741 				break;
3742 			} else {
3743 				drv_usecwait(10);
3744 			}
3745 		}
3746 	}
3747 }
3748 
3749 
3750 /*
3751  * ql_abort_isp
3752  *	Resets ISP and aborts all outstanding commands.
3753  *
3754  * Input:
3755  *	ha = adapter state pointer.
3756  *	DEVICE_QUEUE_LOCK must be released.
3757  *
3758  * Returns:
3759  *	ql local function return status code.
3760  *
3761  * Context:
3762  *	Kernel context.
3763  */
3764 int
3765 ql_abort_isp(ql_adapter_state_t *vha)
3766 {
3767 	ql_link_t		*link, *link2;
3768 	ddi_devstate_t		state;
3769 	uint16_t		index;
3770 	ql_tgt_t		*tq;
3771 	ql_lun_t		*lq;
3772 	ql_srb_t		*sp;
3773 	int			rval = QL_SUCCESS;
3774 	ql_adapter_state_t	*ha = vha->pha;
3775 
3776 	QL_PRINT_2(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3777 
3778 	TASK_DAEMON_LOCK(ha);
3779 	ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
3780 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
3781 	    (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
3782 		TASK_DAEMON_UNLOCK(ha);
3783 		return (rval);
3784 	}
3785 
3786 	ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
3787 	ha->task_daemon_flags &= ~(RESET_MARKER_NEEDED | FIRMWARE_UP |
3788 	    FIRMWARE_LOADED);
3789 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3790 		vha->task_daemon_flags |= LOOP_DOWN;
3791 		vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
3792 		    LOOP_RESYNC_NEEDED);
3793 	}
3794 
3795 	TASK_DAEMON_UNLOCK(ha);
3796 
3797 	if (ha->mailbox_flags & MBX_BUSY_FLG) {
3798 		/* Acquire mailbox register lock. */
3799 		MBX_REGISTER_LOCK(ha);
3800 
3801 		/* Wake up mailbox box routine. */
3802 		ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
3803 		cv_broadcast(&ha->cv_mbx_intr);
3804 
3805 		/* Release mailbox register lock. */
3806 		MBX_REGISTER_UNLOCK(ha);
3807 
3808 		/* Wait for mailbox. */
3809 		for (index = 100; index &&
3810 		    ha->mailbox_flags & MBX_ABORT; index--) {
3811 			drv_usecwait(50000);
3812 		}
3813 	}
3814 
3815 	/* Wait for commands to end gracefully if not in panic. */
3816 	if (ha->flags & PARITY_ERROR) {
3817 		ADAPTER_STATE_LOCK(ha);
3818 		ha->flags &= ~PARITY_ERROR;
3819 		ADAPTER_STATE_UNLOCK(ha);
3820 	} else if (ddi_in_panic() == 0) {
3821 		ql_cmd_wait(ha);
3822 	}
3823 
3824 	/* Shutdown IP. */
3825 	if (ha->flags & IP_INITIALIZED) {
3826 		(void) ql_shutdown_ip(ha);
3827 	}
3828 
3829 	/* Reset the chip. */
3830 	ql_reset_chip(ha);
3831 
3832 	/*
3833 	 * Even though we have waited for outstanding commands to complete,
3834 	 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
3835 	 * there could still be an interrupt thread active.  The interrupt
3836 	 * lock will prevent us from getting an sp from the outstanding
3837 	 * cmds array that the ISR may be using.
3838 	 */
3839 
3840 	/* Place all commands in outstanding cmd list on device queue. */
3841 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
3842 		REQUEST_RING_LOCK(ha);
3843 		INTR_LOCK(ha);
3844 		if ((link = ha->pending_cmds.first) != NULL) {
3845 			sp = link->base_address;
3846 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
3847 
3848 			REQUEST_RING_UNLOCK(ha);
3849 			index = 0;
3850 		} else {
3851 			REQUEST_RING_UNLOCK(ha);
3852 			if ((sp = ha->outstanding_cmds[index]) == NULL) {
3853 				INTR_UNLOCK(ha);
3854 				continue;
3855 			}
3856 		}
3857 
3858 		/*
3859 		 * It's not obvious but the index for commands pulled from
3860 		 * pending will be zero and that entry in the outstanding array
3861 		 * is not used so nulling it is "no harm, no foul".
3862 		 */
3863 
3864 		ha->outstanding_cmds[index] = NULL;
3865 		sp->handle = 0;
3866 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3867 
3868 		INTR_UNLOCK(ha);
3869 
3870 		/* If command timeout. */
3871 		if (sp->flags & SRB_COMMAND_TIMEOUT) {
3872 			sp->pkt->pkt_reason = CS_TIMEOUT;
3873 			sp->flags &= ~SRB_RETRY;
3874 			sp->flags |= SRB_ISP_COMPLETED;
3875 
3876 			/* Call done routine to handle completion. */
3877 			ql_done(&sp->cmd);
3878 			continue;
3879 		}
3880 
3881 		/* Acquire target queue lock. */
3882 		lq = sp->lun_queue;
3883 		tq = lq->target_queue;
3884 		DEVICE_QUEUE_LOCK(tq);
3885 
3886 		/* Reset watchdog time. */
3887 		sp->wdg_q_time = sp->init_wdg_q_time;
3888 
3889 		/* Place request back on top of device queue. */
3890 		sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
3891 		    SRB_RETRY);
3892 
3893 		ql_add_link_t(&lq->cmd, &sp->cmd);
3894 		sp->flags |= SRB_IN_DEVICE_QUEUE;
3895 
3896 		/* Release target queue lock. */
3897 		DEVICE_QUEUE_UNLOCK(tq);
3898 	}
3899 
3900 	/*
3901 	 * Clear per LUN active count, because there should not be
3902 	 * any IO outstanding at this time.
3903 	 */
3904 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3905 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3906 			link = vha->dev[index].first;
3907 			while (link != NULL) {
3908 				tq = link->base_address;
3909 				link = link->next;
3910 				DEVICE_QUEUE_LOCK(tq);
3911 				tq->outcnt = 0;
3912 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
3913 				for (link2 = tq->lun_queues.first;
3914 				    link2 != NULL; link2 = link2->next) {
3915 					lq = link2->base_address;
3916 					lq->lun_outcnt = 0;
3917 					lq->flags &= ~LQF_UNTAGGED_PENDING;
3918 				}
3919 				DEVICE_QUEUE_UNLOCK(tq);
3920 			}
3921 		}
3922 	}
3923 
3924 	rval = ql_chip_diag(ha);
3925 	if (rval == QL_SUCCESS) {
3926 		(void) ql_load_isp_firmware(ha);
3927 	}
3928 
3929 	if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
3930 	    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
3931 	    (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
3932 
3933 		/* If reset abort needed that may have been set. */
3934 		TASK_DAEMON_LOCK(ha);
3935 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
3936 		    ABORT_ISP_ACTIVE);
3937 		TASK_DAEMON_UNLOCK(ha);
3938 
3939 		/* Enable ISP interrupts. */
3940 		CFG_IST(ha, CFG_CTRL_242581) ?
3941 		    WRT32_IO_REG(ha, ictrl, ISP_EN_RISC) :
3942 		    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
3943 
3944 		ADAPTER_STATE_LOCK(ha);
3945 		ha->flags |= INTERRUPTS_ENABLED;
3946 		ADAPTER_STATE_UNLOCK(ha);
3947 
3948 		/* Set loop online, if it really is. */
3949 		ql_loop_online(ha);
3950 
3951 		state = ddi_get_devstate(ha->dip);
3952 		if (state != DDI_DEVSTATE_UP) {
3953 			/*EMPTY*/
3954 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_RESTORED,
3955 			    DDI_DEVICE_FAULT, "Device reset succeeded");
3956 		}
3957 	} else {
3958 		/* Enable ISP interrupts. */
3959 		CFG_IST(ha, CFG_CTRL_242581) ?
3960 		    WRT32_IO_REG(ha, ictrl, ISP_EN_RISC) :
3961 		    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
3962 
3963 		ADAPTER_STATE_LOCK(ha);
3964 		ha->flags |= INTERRUPTS_ENABLED;
3965 		ADAPTER_STATE_UNLOCK(ha);
3966 
3967 		TASK_DAEMON_LOCK(ha);
3968 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE);
3969 		ha->task_daemon_flags |= LOOP_DOWN;
3970 		TASK_DAEMON_UNLOCK(ha);
3971 
3972 		ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
3973 	}
3974 
3975 	if (rval != QL_SUCCESS) {
3976 		EL(ha, "failed, rval = %xh\n", rval);
3977 	} else {
3978 		/*EMPTY*/
3979 		QL_PRINT_2(CE_CONT, "(%d): done\n", ha->instance);
3980 	}
3981 	return (rval);
3982 }
3983 
3984 /*
3985  * ql_vport_control
3986  *	Issue Virtual Port Control command.
3987  *
3988  * Input:
3989  *	ha = virtual adapter state pointer.
3990  *	cmd = control command.
3991  *
3992  * Returns:
3993  *	ql local function return status code.
3994  *
3995  * Context:
3996  *	Kernel context.
3997  */
3998 int
3999 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
4000 {
4001 	ql_mbx_iocb_t	*pkt;
4002 	uint8_t		bit;
4003 	int		rval;
4004 	uint32_t	pkt_size;
4005 
4006 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4007 
4008 	if (ha->vp_index != 0) {
4009 		pkt_size = sizeof (ql_mbx_iocb_t);
4010 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4011 		if (pkt == NULL) {
4012 			EL(ha, "failed, kmem_zalloc\n");
4013 			return (QL_MEMORY_ALLOC_FAILED);
4014 		}
4015 
4016 		pkt->vpc.entry_type = VP_CONTROL_TYPE;
4017 		pkt->vpc.entry_count = 1;
4018 		pkt->vpc.command = cmd;
4019 		pkt->vpc.vp_count = 1;
4020 		bit = (uint8_t)(ha->vp_index - 1);
4021 		pkt->vpc.vp_index[bit / 8] = (uint8_t)
4022 		    (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4023 
4024 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4025 		if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4026 			rval = QL_COMMAND_ERROR;
4027 		}
4028 
4029 		kmem_free(pkt, pkt_size);
4030 	} else {
4031 		rval = QL_SUCCESS;
4032 	}
4033 
4034 	if (rval != QL_SUCCESS) {
4035 		EL(ha, "failed, rval = %xh\n", rval);
4036 	} else {
4037 		/*EMPTY*/
4038 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4039 		    ha->vp_index);
4040 	}
4041 	return (rval);
4042 }
4043 
4044 /*
4045  * ql_vport_modify
4046  *	Issue of Modify Virtual Port command.
4047  *
4048  * Input:
4049  *	ha = virtual adapter state pointer.
4050  *	cmd = command.
4051  *	opt = option.
4052  *
4053  * Context:
4054  *	Interrupt or Kernel context, no mailbox commands allowed.
4055  */
4056 int
4057 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4058 {
4059 	ql_mbx_iocb_t	*pkt;
4060 	int		rval;
4061 	uint32_t	pkt_size;
4062 
4063 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4064 
4065 	pkt_size = sizeof (ql_mbx_iocb_t);
4066 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4067 	if (pkt == NULL) {
4068 		EL(ha, "failed, kmem_zalloc\n");
4069 		return (QL_MEMORY_ALLOC_FAILED);
4070 	}
4071 
4072 	pkt->vpm.entry_type = VP_MODIFY_TYPE;
4073 	pkt->vpm.entry_count = 1;
4074 	pkt->vpm.command = cmd;
4075 	pkt->vpm.vp_count = 1;
4076 	pkt->vpm.first_vp_index = ha->vp_index;
4077 	pkt->vpm.first_options = opt;
4078 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4079 	    8);
4080 	bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4081 	    8);
4082 
4083 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4084 	if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4085 		EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4086 		    pkt->vpm.status);
4087 		rval = QL_COMMAND_ERROR;
4088 	}
4089 
4090 	kmem_free(pkt, pkt_size);
4091 
4092 	if (rval != QL_SUCCESS) {
4093 		EL(ha, "failed, rval = %xh\n", rval);
4094 	} else {
4095 		/*EMPTY*/
4096 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4097 		    ha->vp_index);
4098 	}
4099 	return (rval);
4100 }
4101 
4102 /*
4103  * ql_vport_enable
4104  *	Enable virtual port.
4105  *
4106  * Input:
4107  *	ha = virtual adapter state pointer.
4108  *
4109  * Context:
4110  *	Kernel context.
4111  */
4112 int
4113 ql_vport_enable(ql_adapter_state_t *ha)
4114 {
4115 	int	timer;
4116 
4117 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4118 
4119 	ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4120 	TASK_DAEMON_LOCK(ha);
4121 	ha->task_daemon_flags |= LOOP_DOWN;
4122 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4123 	TASK_DAEMON_UNLOCK(ha);
4124 
4125 	ADAPTER_STATE_LOCK(ha);
4126 	ha->flags |= VP_ENABLED;
4127 	ADAPTER_STATE_UNLOCK(ha);
4128 
4129 	if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4130 	    VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4131 		QL_PRINT_2(CE_CONT, "(%d): failed to enable virtual port=%d\n",
4132 		    ha->instance, ha->vp_index);
4133 		return (QL_FUNCTION_FAILED);
4134 	}
4135 	if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4136 		/* Wait for loop to come up. */
4137 		for (timer = 0; timer < 3000 &&
4138 		    !(ha->task_daemon_flags & STATE_ONLINE);
4139 		    timer++) {
4140 			delay(1);
4141 		}
4142 	}
4143 
4144 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4145 
4146 	return (QL_SUCCESS);
4147 }
4148 
4149 /*
4150  * ql_vport_create
4151  *	Create virtual port context.
4152  *
4153  * Input:
4154  *	ha:	parent adapter state pointer.
4155  *	index:	virtual port index number.
4156  *
4157  * Context:
4158  *	Kernel context.
4159  */
4160 ql_adapter_state_t *
4161 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4162 {
4163 	ql_adapter_state_t	*vha;
4164 
4165 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4166 
4167 	/* Inherit the parents data. */
4168 	vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4169 
4170 	ADAPTER_STATE_LOCK(ha);
4171 	bcopy(ha, vha, sizeof (ql_adapter_state_t));
4172 	vha->pi_attrs = NULL;
4173 	vha->ub_outcnt = 0;
4174 	vha->ub_allocated = 0;
4175 	vha->flags = 0;
4176 	vha->task_daemon_flags = 0;
4177 	ha->vp_next = vha;
4178 	vha->pha = ha;
4179 	vha->vp_index = index;
4180 	ADAPTER_STATE_UNLOCK(ha);
4181 
4182 	vha->hba.next = NULL;
4183 	vha->hba.prev = NULL;
4184 	vha->hba.base_address = vha;
4185 	vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4186 	vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4187 	    KM_SLEEP);
4188 	vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4189 	    KM_SLEEP);
4190 
4191 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4192 
4193 	return (vha);
4194 }
4195 
4196 /*
4197  * ql_vport_destroy
4198  *	Destroys virtual port context.
4199  *
4200  * Input:
4201  *	ha = virtual adapter state pointer.
4202  *
4203  * Context:
4204  *	Kernel context.
4205  */
4206 void
4207 ql_vport_destroy(ql_adapter_state_t *ha)
4208 {
4209 	ql_adapter_state_t	*vha;
4210 
4211 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4212 
4213 	/* Remove port from list. */
4214 	ADAPTER_STATE_LOCK(ha);
4215 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4216 		if (vha->vp_next == ha) {
4217 			vha->vp_next = ha->vp_next;
4218 			break;
4219 		}
4220 	}
4221 	ADAPTER_STATE_UNLOCK(ha);
4222 
4223 	if (ha->ub_array != NULL) {
4224 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4225 	}
4226 	if (ha->dev != NULL) {
4227 		kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4228 	}
4229 	kmem_free(ha, sizeof (ql_adapter_state_t));
4230 
4231 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4232 }
4233 
4234 /*
4235  * ql_mps_reset
4236  *	Reset MPS for FCoE functions.
4237  *
4238  * Input:
4239  *	ha = virtual adapter state pointer.
4240  *
4241  * Context:
4242  *	Kernel context.
4243  */
4244 static void
4245 ql_mps_reset(ql_adapter_state_t *ha)
4246 {
4247 	uint32_t	data, dctl = 1000;
4248 
4249 	do {
4250 		if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) !=
4251 		    QL_SUCCESS) {
4252 			return;
4253 		}
4254 		if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) {
4255 			ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4256 			return;
4257 		}
4258 	} while (!(data & BIT_0));
4259 
4260 	if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) {
4261 		dctl = (uint16_t)ql_pci_config_get16(ha, 0x54);
4262 		if ((data & 0xe0) != (dctl & 0xe0)) {
4263 			data &= 0xff1f;
4264 			data |= dctl & 0xe0;
4265 			ql_wrt_risc_ram_word(ha, 0x7A15, data);
4266 		}
4267 	}
4268 	ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4269 }
4270