1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2008 QLogic Corporation */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2008 QLogic Corporation; ql_iocb.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2008 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_xioctl.h>
50 
51 /*
52  * Local Function Prototypes.
53  */
54 static void ql_continuation_iocb(ql_adapter_state_t *, ddi_dma_cookie_t *,
55     uint16_t, boolean_t);
56 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *);
57 
58 /*
59  * ql_start_iocb
60  *	The start IOCB is responsible for building request packets
61  *	on request ring and modifying ISP input pointer.
62  *
63  * Input:
64  *	ha:	adapter state pointer.
65  *	sp:	srb structure pointer.
66  *
67  * Context:
68  *	Interrupt or Kernel context, no mailbox commands allowed.
69  */
70 void
71 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp)
72 {
73 	ql_link_t		*link;
74 	request_t		*pkt;
75 	uint64_t		*ptr64;
76 	uint32_t		cnt;
77 	ql_adapter_state_t	*ha = vha->pha;
78 
79 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
80 
81 	/* Acquire ring lock. */
82 	REQUEST_RING_LOCK(ha);
83 
84 	if (sp != NULL) {
85 		/*
86 		 * Start any pending ones before this one.
87 		 * Add command to pending command queue if not empty.
88 		 */
89 		if ((link = ha->pending_cmds.first) != NULL) {
90 			ql_add_link_b(&ha->pending_cmds, &sp->cmd);
91 			/* Remove command from pending command queue */
92 			sp = link->base_address;
93 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
94 		}
95 	} else {
96 		/* Get command from pending command queue if not empty. */
97 		if ((link = ha->pending_cmds.first) == NULL) {
98 			/* Release ring specific lock */
99 			REQUEST_RING_UNLOCK(ha);
100 			QL_PRINT_3(CE_CONT, "(%d): empty done\n",
101 			    ha->instance);
102 			return;
103 		}
104 		/* Remove command from pending command queue */
105 		sp = link->base_address;
106 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
107 	}
108 
109 	/* start as many as possible */
110 	for (;;) {
111 		if (ha->req_q_cnt < sp->req_cnt) {
112 			/* Calculate number of free request entries. */
113 			cnt = RD16_IO_REG(ha, req_out);
114 			if (ha->req_ring_index < cnt)  {
115 				ha->req_q_cnt = (uint16_t)
116 				    (cnt - ha->req_ring_index);
117 			} else {
118 				ha->req_q_cnt = (uint16_t)(REQUEST_ENTRY_CNT -
119 				    (ha->req_ring_index - cnt));
120 			}
121 			if (ha->req_q_cnt != 0) {
122 				ha->req_q_cnt--;
123 			}
124 
125 			/* If no room for request in request ring. */
126 			if (ha->req_q_cnt < sp->req_cnt) {
127 				QL_PRINT_8(CE_CONT, "(%d): request ring full,"
128 				    " req_q_cnt=%d, req_ring_index=%d\n",
129 				    ha->instance, ha->req_q_cnt,
130 				    ha->req_ring_index);
131 				ql_add_link_t(&ha->pending_cmds, &sp->cmd);
132 				break;
133 			}
134 		}
135 
136 		/* Check for room in outstanding command list. */
137 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
138 			ha->osc_index++;
139 			if (ha->osc_index == MAX_OUTSTANDING_COMMANDS) {
140 				ha->osc_index = 1;
141 			}
142 			if (ha->outstanding_cmds[ha->osc_index] == NULL) {
143 				break;
144 			}
145 		}
146 
147 		if (cnt == MAX_OUTSTANDING_COMMANDS) {
148 			QL_PRINT_8(CE_CONT, "(%d): no room in outstanding "
149 			    "array\n", ha->instance);
150 			ql_add_link_t(&ha->pending_cmds, &sp->cmd);
151 			break;
152 		}
153 
154 		/* If room for request in request ring. */
155 		ha->outstanding_cmds[ha->osc_index] = sp;
156 		sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT |
157 		    ha->osc_index;
158 		ha->req_q_cnt -= sp->req_cnt;
159 		pkt = ha->request_ring_ptr;
160 		sp->flags |= SRB_IN_TOKEN_ARRAY;
161 
162 		/* Zero out packet. */
163 		ptr64 = (uint64_t *)pkt;
164 		*ptr64++ = 0; *ptr64++ = 0;
165 		*ptr64++ = 0; *ptr64++ = 0;
166 		*ptr64++ = 0; *ptr64++ = 0;
167 		*ptr64++ = 0; *ptr64 = 0;
168 
169 		/* Setup IOCB common data. */
170 		pkt->entry_count = (uint8_t)sp->req_cnt;
171 		pkt->sys_define = (uint8_t)ha->req_ring_index;
172 		ddi_put32(ha->hba_buf.acc_handle, &pkt->handle,
173 		    (uint32_t)sp->handle);
174 
175 		/* Setup remaining IOCB data. */
176 		(sp->iocb)(vha, sp, pkt);
177 
178 		sp->flags |= SRB_ISP_STARTED;
179 
180 		QL_PRINT_5(CE_CONT, "(%d,%d): req packet, sp=%p\n",
181 		    ha->instance, vha->vp_index, (void *)sp);
182 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
183 
184 		/* Sync DMA buffer. */
185 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
186 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
187 		    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
188 		    DDI_DMA_SYNC_FORDEV);
189 
190 		/* Adjust ring index. */
191 		ha->req_ring_index++;
192 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
193 			ha->req_ring_index = 0;
194 			ha->request_ring_ptr = ha->request_ring_bp;
195 		} else {
196 			ha->request_ring_ptr++;
197 		}
198 
199 		/* Reset watchdog timer */
200 		sp->wdg_q_time = sp->init_wdg_q_time;
201 
202 		/* Set chip new ring index. */
203 		WRT16_IO_REG(ha, req_in, ha->req_ring_index);
204 
205 		/* Update outstanding command count statistic. */
206 		ha->adapter_stats->ncmds++;
207 
208 		if ((link = ha->pending_cmds.first) == NULL) {
209 			break;
210 		}
211 
212 		/* Remove command from pending command queue */
213 		sp = link->base_address;
214 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
215 	}
216 
217 	/* Release ring specific lock */
218 	REQUEST_RING_UNLOCK(ha);
219 
220 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
221 }
222 
223 /*
224  * ql_req_pkt
225  *	Function is responsible for locking ring and
226  *	getting a zeroed out request packet.
227  *
228  * Input:
229  *	ha:	adapter state pointer.
230  *	pkt:	address for packet pointer.
231  *
232  * Returns:
233  *	ql local function return status code.
234  *
235  * Context:
236  *	Interrupt or Kernel context, no mailbox commands allowed.
237  */
238 int
239 ql_req_pkt(ql_adapter_state_t *vha, request_t **pktp)
240 {
241 	uint16_t		cnt;
242 	uint32_t		*long_ptr;
243 	uint32_t		timer;
244 	int			rval = QL_FUNCTION_TIMEOUT;
245 	ql_adapter_state_t	*ha = vha->pha;
246 
247 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
248 
249 	/* Wait for 30 seconds for slot. */
250 	for (timer = 30000; timer != 0; timer--) {
251 		/* Acquire ring lock. */
252 		REQUEST_RING_LOCK(ha);
253 
254 		if (ha->req_q_cnt == 0) {
255 			/* Calculate number of free request entries. */
256 			cnt = RD16_IO_REG(ha, req_out);
257 			if (ha->req_ring_index < cnt) {
258 				ha->req_q_cnt = (uint16_t)
259 				    (cnt - ha->req_ring_index);
260 			} else {
261 				ha->req_q_cnt = (uint16_t)
262 				    (REQUEST_ENTRY_CNT -
263 				    (ha->req_ring_index - cnt));
264 			}
265 			if (ha->req_q_cnt != 0) {
266 				ha->req_q_cnt--;
267 			}
268 		}
269 
270 		/* Found empty request ring slot? */
271 		if (ha->req_q_cnt != 0) {
272 			ha->req_q_cnt--;
273 			*pktp = ha->request_ring_ptr;
274 
275 			/* Zero out packet. */
276 			long_ptr = (uint32_t *)ha->request_ring_ptr;
277 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE/4; cnt++) {
278 				*long_ptr++ = 0;
279 			}
280 
281 			/* Setup IOCB common data. */
282 			ha->request_ring_ptr->entry_count = 1;
283 			ha->request_ring_ptr->sys_define =
284 			    (uint8_t)ha->req_ring_index;
285 			ddi_put32(ha->hba_buf.acc_handle,
286 			    &ha->request_ring_ptr->handle,
287 			    (uint32_t)QL_FCA_BRAND);
288 
289 			rval = QL_SUCCESS;
290 
291 			break;
292 		}
293 
294 		/* Release request queue lock. */
295 		REQUEST_RING_UNLOCK(ha);
296 
297 		drv_usecwait(MILLISEC);
298 
299 		/* Check for pending interrupts. */
300 		/*
301 		 * XXX protect interrupt routine from calling itself.
302 		 * Need to revisit this routine. So far we never
303 		 * hit this case as req slot was available
304 		 */
305 		if ((!(curthread->t_flag & T_INTR_THREAD)) &&
306 		    (RD16_IO_REG(ha, istatus) & RISC_INT)) {
307 			(void) ql_isr((caddr_t)ha);
308 			INTR_LOCK(ha);
309 			ha->intr_claimed = TRUE;
310 			INTR_UNLOCK(ha);
311 		}
312 	}
313 
314 	if (rval != QL_SUCCESS) {
315 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
316 		EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval);
317 	} else {
318 		/*EMPTY*/
319 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
320 	}
321 	return (rval);
322 }
323 
324 /*
325  * ql_isp_cmd
326  *	Function is responsible for modifying ISP input pointer.
327  *	Releases ring lock.
328  *
329  * Input:
330  *	ha:	adapter state pointer.
331  *
332  * Context:
333  *	Interrupt or Kernel context, no mailbox commands allowed.
334  */
335 void
336 ql_isp_cmd(ql_adapter_state_t *vha)
337 {
338 	ql_adapter_state_t	*ha = vha->pha;
339 
340 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
341 
342 	QL_PRINT_5(CE_CONT, "(%d): req packet:\n", ha->instance);
343 	QL_DUMP_5((uint8_t *)ha->request_ring_ptr, 8, REQUEST_ENTRY_SIZE);
344 
345 	/* Sync DMA buffer. */
346 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
347 	    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
348 	    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
349 	    DDI_DMA_SYNC_FORDEV);
350 
351 	/* Adjust ring index. */
352 	ha->req_ring_index++;
353 	if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
354 		ha->req_ring_index = 0;
355 		ha->request_ring_ptr = ha->request_ring_bp;
356 	} else {
357 		ha->request_ring_ptr++;
358 	}
359 
360 	/* Set chip new ring index. */
361 	WRT16_IO_REG(ha, req_in, ha->req_ring_index);
362 
363 	/* Release ring lock. */
364 	REQUEST_RING_UNLOCK(ha);
365 
366 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
367 }
368 
369 /*
370  * ql_command_iocb
371  *	Setup of command IOCB.
372  *
373  * Input:
374  *	ha:	adapter state pointer.
375  *	sp:	srb structure pointer.
376  *
377  *	arg:	request queue packet.
378  *
379  * Context:
380  *	Interrupt or Kernel context, no mailbox commands allowed.
381  */
382 void
383 ql_command_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
384 {
385 	ddi_dma_cookie_t	*cp;
386 	uint32_t		*ptr32, cnt;
387 	uint16_t		seg_cnt;
388 	fcp_cmd_t		*fcp = sp->fcp;
389 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
390 	cmd_entry_t		*pkt = arg;
391 
392 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
393 
394 	/* Set LUN number */
395 	pkt->lun_l = LSB(sp->lun_queue->lun_no);
396 	pkt->lun_h = MSB(sp->lun_queue->lun_no);
397 
398 	/* Set target ID */
399 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
400 		pkt->target_l = LSB(tq->loop_id);
401 		pkt->target_h = MSB(tq->loop_id);
402 	} else {
403 		pkt->target_h = LSB(tq->loop_id);
404 	}
405 
406 	/* Set tag queue control flags */
407 	if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
408 		pkt->control_flags_l = (uint8_t)
409 		    (pkt->control_flags_l | CF_HTAG);
410 	} else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
411 		pkt->control_flags_l = (uint8_t)
412 		    (pkt->control_flags_l | CF_OTAG);
413 	/* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */
414 	} else {
415 		pkt->control_flags_l = (uint8_t)
416 		    (pkt->control_flags_l | CF_STAG);
417 	}
418 
419 	/* Set ISP command timeout. */
420 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
421 
422 	/* Load SCSI CDB */
423 	ddi_rep_put8(ha->hba_buf.acc_handle, fcp->fcp_cdb,
424 	    pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR);
425 
426 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
427 		pkt->entry_type = IOCB_CMD_TYPE_3;
428 		cnt = CMD_TYPE_3_DATA_SEGMENTS;
429 	} else {
430 		pkt->entry_type = IOCB_CMD_TYPE_2;
431 		cnt = CMD_TYPE_2_DATA_SEGMENTS;
432 	}
433 
434 	if (fcp->fcp_data_len == 0) {
435 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
436 		ha->xioctl->IOControlRequests++;
437 		return;
438 	}
439 
440 	/*
441 	 * Set transfer direction. Load Data segments.
442 	 */
443 	if (fcp->fcp_cntl.cntl_write_data) {
444 		pkt->control_flags_l = (uint8_t)
445 		    (pkt->control_flags_l | CF_DATA_OUT);
446 		ha->xioctl->IOOutputRequests++;
447 		ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
448 	} else if (fcp->fcp_cntl.cntl_read_data) {
449 		pkt->control_flags_l = (uint8_t)
450 		    (pkt->control_flags_l | CF_DATA_IN);
451 		ha->xioctl->IOInputRequests++;
452 		ha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
453 	}
454 
455 	/* Set data segment count. */
456 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
457 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
458 
459 	/* Load total byte count. */
460 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, fcp->fcp_data_len);
461 
462 	/* Load command data segment. */
463 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
464 	cp = sp->pkt->pkt_data_cookie;
465 	while (cnt && seg_cnt) {
466 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
467 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
468 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
469 			    cp->dmac_notused);
470 		}
471 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
472 		    (uint32_t)cp->dmac_size);
473 		seg_cnt--;
474 		cnt--;
475 		cp++;
476 	}
477 
478 	/*
479 	 * Build continuation packets.
480 	 */
481 	if (seg_cnt) {
482 		ql_continuation_iocb(ha, cp, seg_cnt,
483 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
484 	}
485 
486 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
487 }
488 
489 /*
490  * ql_continuation_iocb
491  *	Setup of continuation IOCB.
492  *
493  * Input:
494  *	ha:		adapter state pointer.
495  *	cp:		cookie list pointer.
496  *	seg_cnt:	number of segments.
497  *	addr64:		64 bit addresses.
498  *
499  * Context:
500  *	Interrupt or Kernel context, no mailbox commands allowed.
501  */
502 static void
503 ql_continuation_iocb(ql_adapter_state_t *ha, ddi_dma_cookie_t *cp,
504     uint16_t seg_cnt, boolean_t addr64)
505 {
506 	cont_entry_t	*pkt;
507 	uint64_t	*ptr64;
508 	uint32_t	*ptr32, cnt;
509 
510 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
511 
512 	/*
513 	 * Build continuation packets.
514 	 */
515 	while (seg_cnt) {
516 		/* Sync DMA buffer. */
517 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
518 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
519 		    REQUEST_Q_BUFFER_OFFSET), REQUEST_ENTRY_SIZE,
520 		    DDI_DMA_SYNC_FORDEV);
521 
522 		/* Adjust ring pointer, and deal with wrap. */
523 		ha->req_ring_index++;
524 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
525 			ha->req_ring_index = 0;
526 			ha->request_ring_ptr = ha->request_ring_bp;
527 		} else {
528 			ha->request_ring_ptr++;
529 		}
530 		pkt = (cont_entry_t *)ha->request_ring_ptr;
531 
532 		/* Zero out packet. */
533 		ptr64 = (uint64_t *)pkt;
534 		*ptr64++ = 0; *ptr64++ = 0;
535 		*ptr64++ = 0; *ptr64++ = 0;
536 		*ptr64++ = 0; *ptr64++ = 0;
537 		*ptr64++ = 0; *ptr64 = 0;
538 
539 		/*
540 		 * Build continuation packet.
541 		 */
542 		pkt->entry_count = 1;
543 		pkt->sys_define = (uint8_t)ha->req_ring_index;
544 		if (addr64) {
545 			pkt->entry_type = CONTINUATION_TYPE_1;
546 			cnt = CONT_TYPE_1_DATA_SEGMENTS;
547 			ptr32 = (uint32_t *)
548 			    &((cont_type_1_entry_t *)pkt)->dseg_0_address;
549 			while (cnt && seg_cnt) {
550 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
551 				    cp->dmac_address);
552 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
553 				    cp->dmac_notused);
554 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
555 				    (uint32_t)cp->dmac_size);
556 				seg_cnt--;
557 				cnt--;
558 				cp++;
559 			}
560 		} else {
561 			pkt->entry_type = CONTINUATION_TYPE_0;
562 			cnt = CONT_TYPE_0_DATA_SEGMENTS;
563 			ptr32 = (uint32_t *)&pkt->dseg_0_address;
564 			while (cnt && seg_cnt) {
565 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
566 				    cp->dmac_address);
567 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
568 				    (uint32_t)cp->dmac_size);
569 				seg_cnt--;
570 				cnt--;
571 				cp++;
572 			}
573 		}
574 
575 		QL_PRINT_5(CE_CONT, "(%d): packet:\n", ha->instance);
576 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
577 	}
578 
579 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
580 }
581 
582 /*
583  * ql_command_24xx_iocb
584  *	Setup of ISP24xx command IOCB.
585  *
586  * Input:
587  *	ha:	adapter state pointer.
588  *	sp:	srb structure pointer.
589  *	arg:	request queue packet.
590  *
591  * Context:
592  *	Interrupt or Kernel context, no mailbox commands allowed.
593  */
594 void
595 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
596 {
597 	ddi_dma_cookie_t	*cp;
598 	uint32_t		*ptr32, cnt;
599 	uint16_t		seg_cnt;
600 	fcp_cmd_t		*fcp = sp->fcp;
601 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
602 	cmd_24xx_entry_t	*pkt = arg;
603 	ql_adapter_state_t	*pha = ha->pha;
604 
605 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
606 
607 	pkt->entry_type = IOCB_CMD_TYPE_7;
608 
609 	/* Set LUN number */
610 	pkt->fcp_lun[2] = LSB(sp->lun_queue->lun_no);
611 	pkt->fcp_lun[3] = MSB(sp->lun_queue->lun_no);
612 
613 	/* Set N_port handle */
614 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
615 
616 	/* Set target ID */
617 	pkt->target_id[0] = tq->d_id.b.al_pa;
618 	pkt->target_id[1] = tq->d_id.b.area;
619 	pkt->target_id[2] = tq->d_id.b.domain;
620 
621 	pkt->vp_index = ha->vp_index;
622 
623 	/* Set ISP command timeout. */
624 	if (sp->isp_timeout < 0x1999) {
625 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
626 		    sp->isp_timeout);
627 	}
628 
629 	/* Load SCSI CDB */
630 	ddi_rep_put8(pha->hba_buf.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb,
631 	    MAX_CMDSZ, DDI_DEV_AUTOINCR);
632 	for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
633 		ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4);
634 	}
635 
636 	/*
637 	 * Set tag queue control flags
638 	 * Note:
639 	 *	Cannot copy fcp->fcp_cntl.cntl_qtype directly,
640 	 *	problem with x86 in 32bit kernel mode
641 	 */
642 	switch (fcp->fcp_cntl.cntl_qtype) {
643 	case FCP_QTYPE_SIMPLE:
644 		pkt->task = TA_STAG;
645 		break;
646 	case FCP_QTYPE_HEAD_OF_Q:
647 		pkt->task = TA_HTAG;
648 		break;
649 	case FCP_QTYPE_ORDERED:
650 		pkt->task = TA_OTAG;
651 		break;
652 	case FCP_QTYPE_ACA_Q_TAG:
653 		pkt->task = TA_ACA;
654 		break;
655 	case FCP_QTYPE_UNTAGGED:
656 		pkt->task = TA_UNTAGGED;
657 		break;
658 	default:
659 		break;
660 	}
661 
662 	if (fcp->fcp_data_len == 0) {
663 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
664 		pha->xioctl->IOControlRequests++;
665 		return;
666 	}
667 
668 	/* Set transfer direction. */
669 	if (fcp->fcp_cntl.cntl_write_data) {
670 		pkt->control_flags = CF_WR;
671 		pha->xioctl->IOOutputRequests++;
672 		pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
673 	} else if (fcp->fcp_cntl.cntl_read_data) {
674 		pkt->control_flags = CF_RD;
675 		pha->xioctl->IOInputRequests++;
676 		pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
677 	}
678 
679 	/* Set data segment count. */
680 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
681 	ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
682 
683 	/* Load total byte count. */
684 	ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count,
685 	    fcp->fcp_data_len);
686 
687 	/* Load command data segment. */
688 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
689 	cp = sp->pkt->pkt_data_cookie;
690 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
691 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
692 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
693 	seg_cnt--;
694 	cp++;
695 
696 	/*
697 	 * Build continuation packets.
698 	 */
699 	if (seg_cnt) {
700 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
701 	}
702 
703 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
704 }
705 
706 /*
707  * ql_marker
708  *	Function issues marker IOCB.
709  *
710  * Input:
711  *	ha:		adapter state pointer.
712  *	loop_id:	device loop ID
713  *	lun:		device LUN
714  *	type:		marker modifier
715  *
716  * Returns:
717  *	ql local function return status code.
718  *
719  * Context:
720  *	Interrupt or Kernel context, no mailbox commands allowed.
721  */
722 int
723 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, uint16_t lun,
724     uint8_t type)
725 {
726 	mrk_entry_t	*pkt;
727 	int		rval;
728 
729 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
730 
731 	rval = ql_req_pkt(ha, (request_t **)&pkt);
732 	if (rval == QL_SUCCESS) {
733 		pkt->entry_type = MARKER_TYPE;
734 
735 		if (CFG_IST(ha, CFG_CTRL_2425)) {
736 			marker_24xx_entry_t	*pkt24 =
737 			    (marker_24xx_entry_t *)pkt;
738 
739 			pkt24->modifier = type;
740 
741 			/* Set LUN number */
742 			pkt24->fcp_lun[2] = LSB(lun);
743 			pkt24->fcp_lun[3] = MSB(lun);
744 
745 			pkt24->vp_index = ha->vp_index;
746 
747 			/* Set N_port handle */
748 			ddi_put16(ha->pha->hba_buf.acc_handle,
749 			    &pkt24->n_port_hdl, loop_id);
750 
751 		} else {
752 			pkt->modifier = type;
753 
754 			pkt->lun_l = LSB(lun);
755 			pkt->lun_h = MSB(lun);
756 
757 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
758 				pkt->target_l = LSB(loop_id);
759 				pkt->target_h = MSB(loop_id);
760 			} else {
761 				pkt->target_h = LSB(loop_id);
762 			}
763 		}
764 
765 		/* Issue command to ISP */
766 		ql_isp_cmd(ha);
767 	}
768 
769 	if (rval != QL_SUCCESS) {
770 		EL(ha, "failed, rval = %xh\n", rval);
771 	} else {
772 		/*EMPTY*/
773 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
774 	}
775 	return (rval);
776 }
777 
778 /*
779  * ql_ms_iocb
780  *	Setup of name/management server IOCB.
781  *
782  * Input:
783  *	ha = adapter state pointer.
784  *	sp = srb structure pointer.
785  *	arg = request queue packet.
786  *
787  * Context:
788  *	Interrupt or Kernel context, no mailbox commands allowed.
789  */
790 void
791 ql_ms_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
792 {
793 	ddi_dma_cookie_t	*cp;
794 	uint32_t		*ptr32;
795 	uint16_t		seg_cnt;
796 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
797 	ms_entry_t		*pkt = arg;
798 
799 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
800 #if 0
801 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
802 #endif
803 	/*
804 	 * Build command packet.
805 	 */
806 	pkt->entry_type = MS_TYPE;
807 
808 	/* Set loop ID */
809 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
810 		pkt->loop_id_l = LSB(tq->loop_id);
811 		pkt->loop_id_h = MSB(tq->loop_id);
812 	} else {
813 		pkt->loop_id_h = LSB(tq->loop_id);
814 	}
815 
816 	/* Set ISP command timeout. */
817 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
818 
819 	/* Set cmd data segment count. */
820 	pkt->cmd_dseg_count_l = 1;
821 
822 	/* Set total data segment count */
823 	seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1);
824 	ddi_put16(ha->hba_buf.acc_handle, &pkt->total_dseg_count, seg_cnt);
825 
826 	/* Load ct cmd byte count. */
827 	ddi_put32(ha->hba_buf.acc_handle, &pkt->cmd_byte_count,
828 	    (uint32_t)sp->pkt->pkt_cmdlen);
829 
830 	/* Load ct rsp byte count. */
831 	ddi_put32(ha->hba_buf.acc_handle, &pkt->resp_byte_count,
832 	    (uint32_t)sp->pkt->pkt_rsplen);
833 
834 	/* Load MS command data segments. */
835 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
836 	cp = sp->pkt->pkt_cmd_cookie;
837 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
838 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
839 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
840 	seg_cnt--;
841 
842 	/* Load MS response entry data segments. */
843 	cp = sp->pkt->pkt_resp_cookie;
844 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
845 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
846 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
847 	seg_cnt--;
848 	cp++;
849 
850 	/*
851 	 * Build continuation packets.
852 	 */
853 	if (seg_cnt) {
854 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
855 	}
856 
857 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
858 }
859 
860 /*
861  * ql_ms_24xx_iocb
862  *	Setup of name/management server IOCB.
863  *
864  * Input:
865  *	ha:	adapter state pointer.
866  *	sp:	srb structure pointer.
867  *	arg:	request queue packet.
868  *
869  * Context:
870  *	Interrupt or Kernel context, no mailbox commands allowed.
871  */
872 void
873 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
874 {
875 	ddi_dma_cookie_t	*cp;
876 	uint32_t		*ptr32;
877 	uint16_t		seg_cnt;
878 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
879 	ct_passthru_entry_t	*pkt = arg;
880 	ql_adapter_state_t	*pha = ha->pha;
881 
882 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
883 #if 0
884 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
885 #endif
886 	/*
887 	 * Build command packet.
888 	 */
889 	pkt->entry_type = CT_PASSTHRU_TYPE;
890 
891 	/* Set loop ID */
892 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
893 
894 	pkt->vp_index = ha->vp_index;
895 
896 	/* Set ISP command timeout. */
897 	if (sp->isp_timeout < 0x1999) {
898 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
899 		    sp->isp_timeout);
900 	}
901 
902 	/* Set cmd/response data segment counts. */
903 	ddi_put16(pha->hba_buf.acc_handle, &pkt->cmd_dseg_count, 1);
904 	seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt;
905 	ddi_put16(pha->hba_buf.acc_handle, &pkt->resp_dseg_count, seg_cnt);
906 
907 	/* Load ct cmd byte count. */
908 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmd_byte_count,
909 	    (uint32_t)sp->pkt->pkt_cmdlen);
910 
911 	/* Load ct rsp byte count. */
912 	ddi_put32(pha->hba_buf.acc_handle, &pkt->resp_byte_count,
913 	    (uint32_t)sp->pkt->pkt_rsplen);
914 
915 	/* Load MS command entry data segments. */
916 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
917 	cp = sp->pkt->pkt_cmd_cookie;
918 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
919 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
920 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
921 
922 	/* Load MS response entry data segments. */
923 	cp = sp->pkt->pkt_resp_cookie;
924 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
925 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
926 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
927 	seg_cnt--;
928 	cp++;
929 
930 	/*
931 	 * Build continuation packets.
932 	 */
933 	if (seg_cnt) {
934 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
935 	}
936 
937 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
938 }
939 
940 /*
941  * ql_ip_iocb
942  *	Setup of IP IOCB.
943  *
944  * Input:
945  *	ha:	adapter state pointer.
946  *	sp:	srb structure pointer.
947  *	arg:	request queue packet.
948  *
949  * Context:
950  *	Interrupt or Kernel context, no mailbox commands allowed.
951  */
952 void
953 ql_ip_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
954 {
955 	ddi_dma_cookie_t	*cp;
956 	uint32_t		*ptr32, cnt;
957 	uint16_t		seg_cnt;
958 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
959 	ip_entry_t		*pkt = arg;
960 
961 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
962 
963 	/* Set loop ID */
964 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
965 		pkt->loop_id_l = LSB(tq->loop_id);
966 		pkt->loop_id_h = MSB(tq->loop_id);
967 	} else {
968 		pkt->loop_id_h = LSB(tq->loop_id);
969 	}
970 
971 	/* Set control flags */
972 	pkt->control_flags_l = BIT_6;
973 	if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
974 		pkt->control_flags_h = BIT_7;
975 	}
976 
977 	/* Set ISP command timeout. */
978 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
979 
980 	/* Set data segment count. */
981 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
982 	/* Load total byte count. */
983 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
984 	    (uint32_t)sp->pkt->pkt_cmdlen);
985 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
986 
987 	/*
988 	 * Build command packet.
989 	 */
990 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
991 		pkt->entry_type = IP_A64_TYPE;
992 		cnt = IP_A64_DATA_SEGMENTS;
993 	} else {
994 		pkt->entry_type = IP_TYPE;
995 		cnt = IP_DATA_SEGMENTS;
996 	}
997 
998 	/* Load command entry data segments. */
999 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1000 	cp = sp->pkt->pkt_cmd_cookie;
1001 	while (cnt && seg_cnt) {
1002 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1003 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1004 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1005 			    cp->dmac_notused);
1006 		}
1007 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1008 		    (uint32_t)cp->dmac_size);
1009 		seg_cnt--;
1010 		cnt--;
1011 		cp++;
1012 	}
1013 
1014 	/*
1015 	 * Build continuation packets.
1016 	 */
1017 	if (seg_cnt) {
1018 		ql_continuation_iocb(ha, cp, seg_cnt,
1019 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
1020 	}
1021 
1022 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1023 }
1024 
1025 /*
1026  * ql_ip_24xx_iocb
1027  *	Setup of IP IOCB for ISP24xx.
1028  *
1029  * Input:
1030  *	ha:	adapter state pointer.
1031  *	sp:	srb structure pointer.
1032  *	arg:	request queue packet.
1033  *
1034  * Context:
1035  *	Interrupt or Kernel context, no mailbox commands allowed.
1036  */
1037 void
1038 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1039 {
1040 	ddi_dma_cookie_t	*cp;
1041 	uint32_t		*ptr32;
1042 	uint16_t		seg_cnt;
1043 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1044 	ip_cmd_entry_t		*pkt = arg;
1045 
1046 	pkt->entry_type = IP_CMD_TYPE;
1047 
1048 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1049 
1050 	/* Set N_port handle */
1051 	ddi_put16(ha->hba_buf.acc_handle, &pkt->hdl_status, tq->loop_id);
1052 
1053 	/* Set ISP command timeout. */
1054 	if (sp->isp_timeout < 0x1999) {
1055 		ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout_hdl,
1056 		    sp->isp_timeout);
1057 	}
1058 
1059 	/* Set data segment count. */
1060 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1061 	/* Load total byte count. */
1062 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1063 	    (uint32_t)sp->pkt->pkt_cmdlen);
1064 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1065 
1066 	/* Set control flags */
1067 	ddi_put16(ha->hba_buf.acc_handle, &pkt->control_flags,
1068 	    (uint16_t)(BIT_0));
1069 
1070 	/* Set frame header control flags */
1071 	ddi_put16(ha->hba_buf.acc_handle, &pkt->frame_hdr_cntrl_flgs,
1072 	    (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ));
1073 
1074 	/* Load command data segment. */
1075 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1076 	cp = sp->pkt->pkt_cmd_cookie;
1077 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1078 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1079 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1080 	seg_cnt--;
1081 	cp++;
1082 
1083 	/*
1084 	 * Build continuation packets.
1085 	 */
1086 	if (seg_cnt) {
1087 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
1088 	}
1089 
1090 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1091 }
1092 
1093 /*
1094  * ql_isp_rcvbuf
1095  *	Locates free buffers and places it on the receive buffer queue.
1096  *
1097  * Input:
1098  *	ha = adapter state pointer.
1099  *
1100  * Context:
1101  *	Interrupt or Kernel context, no mailbox commands allowed.
1102  */
1103 void
1104 ql_isp_rcvbuf(ql_adapter_state_t *ha)
1105 {
1106 	rcvbuf_t	*container;
1107 	int16_t		rcv_q_cnt;
1108 	uint16_t	index = 0;
1109 	uint16_t	index1 = 1;
1110 	int		debounce_count = QL_MAX_DEBOUNCE;
1111 	ql_srb_t	*sp;
1112 	fc_unsol_buf_t	*ubp;
1113 	int		ring_updated = FALSE;
1114 
1115 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1116 		ql_isp24xx_rcvbuf(ha);
1117 		return;
1118 	}
1119 
1120 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1121 
1122 	/* Acquire adapter state lock. */
1123 	ADAPTER_STATE_LOCK(ha);
1124 
1125 	/* Calculate number of free receive buffer entries. */
1126 	index = RD16_IO_REG(ha, mailbox[8]);
1127 	do {
1128 		index1 = RD16_IO_REG(ha, mailbox[8]);
1129 		if (index1 == index) {
1130 			break;
1131 		} else {
1132 			index = index1;
1133 		}
1134 	} while (debounce_count --);
1135 
1136 	if (debounce_count < 0) {
1137 		/* This should never happen */
1138 		EL(ha, "max mb8 debounce retries exceeded\n");
1139 	}
1140 
1141 	rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ?
1142 	    index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT -
1143 	    (ha->rcvbuf_ring_index - index));
1144 
1145 	if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) {
1146 		rcv_q_cnt--;
1147 	}
1148 
1149 	/* Load all free buffers in ISP receive buffer ring. */
1150 	index = 0;
1151 	while (rcv_q_cnt >= (uint16_t)0 && index < QL_UB_LIMIT) {
1152 		/* Locate a buffer to give. */
1153 		QL_UB_LOCK(ha);
1154 		while (index < QL_UB_LIMIT) {
1155 			ubp = ha->ub_array[index];
1156 			if (ubp != NULL) {
1157 				sp = ubp->ub_fca_private;
1158 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1159 				    (ha->flags & IP_INITIALIZED) &&
1160 				    (sp->flags & SRB_UB_IN_FCA) &&
1161 				    (!(sp->flags & (SRB_UB_IN_ISP |
1162 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1163 				    SRB_UB_ACQUIRED)))) {
1164 					sp->flags |= SRB_UB_IN_ISP;
1165 					break;
1166 				}
1167 			}
1168 			index++;
1169 		}
1170 
1171 		if (index < QL_UB_LIMIT) {
1172 			rcv_q_cnt--;
1173 			index++;
1174 			container = ha->rcvbuf_ring_ptr;
1175 
1176 			/*
1177 			 * Build container.
1178 			 */
1179 			ddi_put32(ha->hba_buf.acc_handle,
1180 			    (uint32_t *)(void *)&container->bufp[0],
1181 			    sp->ub_buffer.cookie.dmac_address);
1182 
1183 			ddi_put32(ha->hba_buf.acc_handle,
1184 			    (uint32_t *)(void *)&container->bufp[1],
1185 			    sp->ub_buffer.cookie.dmac_notused);
1186 
1187 			ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1188 			    LSW(sp->handle));
1189 
1190 			ha->ub_outcnt++;
1191 
1192 			/* Adjust ring index. */
1193 			ha->rcvbuf_ring_index++;
1194 			if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) {
1195 				ha->rcvbuf_ring_index = 0;
1196 				ha->rcvbuf_ring_ptr = ha->rcvbuf_ring_bp;
1197 			} else {
1198 				ha->rcvbuf_ring_ptr++;
1199 			}
1200 
1201 			ring_updated = TRUE;
1202 		}
1203 		QL_UB_UNLOCK(ha);
1204 	}
1205 
1206 	if (ring_updated) {
1207 		/* Sync queue. */
1208 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1209 		    (off_t)RCVBUF_Q_BUFFER_OFFSET, (size_t)RCVBUF_QUEUE_SIZE,
1210 		    DDI_DMA_SYNC_FORDEV);
1211 
1212 		/* Set chip new ring index. */
1213 		WRT16_IO_REG(ha, mailbox[8], ha->rcvbuf_ring_index);
1214 	}
1215 
1216 	/* Release adapter state lock. */
1217 	ADAPTER_STATE_UNLOCK(ha);
1218 
1219 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1220 }
1221 
1222 /*
1223  * ql_isp24xx_rcvbuf
1224  *	Locates free buffers and send it to adapter.
1225  *
1226  * Input:
1227  *	ha = adapter state pointer.
1228  *
1229  * Context:
1230  *	Interrupt or Kernel context, no mailbox commands allowed.
1231  */
1232 static void
1233 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha)
1234 {
1235 	rcvbuf_t		*container;
1236 	uint16_t		index;
1237 	ql_srb_t		*sp;
1238 	fc_unsol_buf_t		*ubp;
1239 	int			rval;
1240 	ip_buf_pool_entry_t	*pkt = NULL;
1241 
1242 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1243 
1244 	for (;;) {
1245 		/* Locate a buffer to give. */
1246 		QL_UB_LOCK(ha);
1247 		for (index = 0; index < QL_UB_LIMIT; index++) {
1248 			ubp = ha->ub_array[index];
1249 			if (ubp != NULL) {
1250 				sp = ubp->ub_fca_private;
1251 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1252 				    (ha->flags & IP_INITIALIZED) &&
1253 				    (sp->flags & SRB_UB_IN_FCA) &&
1254 				    (!(sp->flags & (SRB_UB_IN_ISP |
1255 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1256 				    SRB_UB_ACQUIRED)))) {
1257 					ha->ub_outcnt++;
1258 					sp->flags |= SRB_UB_IN_ISP;
1259 					break;
1260 				}
1261 			}
1262 		}
1263 		QL_UB_UNLOCK(ha);
1264 		if (index == QL_UB_LIMIT) {
1265 			break;
1266 		}
1267 
1268 		/* Get IOCB packet for buffers. */
1269 		if (pkt == NULL) {
1270 			rval = ql_req_pkt(ha, (request_t **)&pkt);
1271 			if (rval != QL_SUCCESS) {
1272 				EL(ha, "failed, ql_req_pkt=%x\n", rval);
1273 				QL_UB_LOCK(ha);
1274 				ha->ub_outcnt--;
1275 				sp->flags &= ~SRB_UB_IN_ISP;
1276 				QL_UB_UNLOCK(ha);
1277 				break;
1278 			}
1279 			pkt->entry_type = IP_BUF_POOL_TYPE;
1280 			container = &pkt->buffers[0];
1281 		}
1282 
1283 		/*
1284 		 * Build container.
1285 		 */
1286 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[0],
1287 		    sp->ub_buffer.cookie.dmac_address);
1288 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[1],
1289 		    sp->ub_buffer.cookie.dmac_notused);
1290 		ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1291 		    LSW(sp->handle));
1292 
1293 		pkt->buffer_count++;
1294 		container++;
1295 
1296 		if (pkt->buffer_count == IP_POOL_BUFFERS) {
1297 			ql_isp_cmd(ha);
1298 			pkt = NULL;
1299 		}
1300 	}
1301 
1302 	if (pkt != NULL) {
1303 		ql_isp_cmd(ha);
1304 	}
1305 
1306 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1307 }
1308 
1309 /*
1310  * ql_modify_lun
1311  *	Function enables, modifies or disables ISP to respond as a target.
1312  *
1313  * Input:
1314  *	ha = adapter state pointer.
1315  *	count = number buffers for incoming commands.
1316  *
1317  * Returns:
1318  *	ql local function return status code.
1319  *
1320  * Context:
1321  *	Interrupt or Kernel context, no mailbox commands allowed.
1322  */
1323 int
1324 ql_modify_lun(ql_adapter_state_t *ha)
1325 {
1326 	enable_lun_entry_t	*pkt;
1327 	int			rval = QL_SUCCESS;
1328 	uint32_t		index, ubcount;
1329 	fc_unsol_buf_t		*ubp;
1330 
1331 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1332 
1333 	/*
1334 	 * Count the number of SCSI unsolicited buffers, that have been
1335 	 * allocated.
1336 	 */
1337 	ADAPTER_STATE_LOCK(ha);
1338 
1339 	ubp = NULL;
1340 	ubcount = 0;
1341 	QL_UB_LOCK(ha);
1342 	for (index = 0; index < QL_UB_LIMIT; index++) {
1343 		ubp = ha->ub_array[index];
1344 		if (ubp != NULL) {
1345 			ql_srb_t *sp = ubp->ub_fca_private;
1346 
1347 			if (sp->ub_type == FC_TYPE_SCSI_FCP &&
1348 			    !(sp->flags & SRB_UB_FREE_REQUESTED)) {
1349 				ubcount++;
1350 			}
1351 		}
1352 	}
1353 	QL_UB_UNLOCK(ha);
1354 
1355 	if (!(ha->flags & TARGET_MODE_INITIALIZED) && (ubcount == 0)) {
1356 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1357 		return (rval);
1358 	}
1359 
1360 	rval = ql_req_pkt(ha, (request_t **)&pkt);
1361 
1362 	if (ha->flags & TARGET_MODE_INITIALIZED) {
1363 		if (ubcount == 0) {
1364 			/* Disable the target mode Luns */
1365 			ASSERT(ha->ub_command_count != 0);
1366 			ASSERT(ha->ub_notify_count != 0);
1367 
1368 			ha->flags &= ~(TARGET_MODE_INITIALIZED);
1369 
1370 			ha->ub_command_count = 0;
1371 			ha->ub_notify_count = 0;
1372 
1373 			pkt->entry_type = ENABLE_LUN_TYPE;
1374 			pkt->command_count = 0;
1375 			pkt->immediate_notify_count = 0;
1376 
1377 		} else {
1378 			/* Modify the command count for target mode */
1379 			modify_lun_entry_t	*ml_pkt;
1380 			uint8_t			cmd_count, notify_count;
1381 
1382 			ASSERT(ha->ub_command_count != 0);
1383 			ASSERT(ha->ub_notify_count != 0);
1384 
1385 			/*
1386 			 * calculate the new value of command count
1387 			 * and notify count and then issue the command
1388 			 * to change the values in the firmware.
1389 			 */
1390 			ml_pkt = (modify_lun_entry_t *)pkt;
1391 			ml_pkt->entry_type = MODIFY_LUN_TYPE;
1392 			if (ubcount < 255) {
1393 				/* Save one for immediate notify. */
1394 				if (ubcount > 1) {
1395 					cmd_count = (uint8_t)(ubcount - 1);
1396 				} else {
1397 					cmd_count = (uint8_t)ubcount;
1398 				}
1399 				notify_count = 1;
1400 			} else {
1401 				cmd_count = 255;
1402 				if (ubcount - 255 < 255) {
1403 					notify_count = (uint8_t)
1404 					    (ubcount - 255);
1405 				} else {
1406 					notify_count = 255;
1407 				}
1408 			}
1409 
1410 			if (cmd_count > ha->ub_command_count) {
1411 				/* cmd_count value increased */
1412 				ml_pkt->command_count =	(uint8_t)
1413 				    (cmd_count - ha->ub_command_count);
1414 				ml_pkt->operators = (uint8_t)
1415 				    (ml_pkt->operators | BIT_0);
1416 
1417 				if (notify_count > ha->ub_notify_count) {
1418 					ml_pkt->immediate_notify_count =
1419 					    (uint8_t)(notify_count -
1420 					    ha->ub_notify_count);
1421 					ml_pkt->operators = (uint8_t)
1422 					    (ml_pkt->operators | BIT_2);
1423 				} else if (notify_count <
1424 				    ha->ub_notify_count) {
1425 					ml_pkt->immediate_notify_count =
1426 					    (uint8_t)(ha->ub_notify_count -
1427 					    notify_count);
1428 					ml_pkt->operators = (uint8_t)
1429 					    (ml_pkt->operators | BIT_3);
1430 				}
1431 			} else {
1432 				/* cmd_count value reduced */
1433 				ml_pkt->command_count =	(uint8_t)
1434 				    (ha->ub_command_count - cmd_count);
1435 				if (ml_pkt->command_count != 0) {
1436 					ml_pkt->operators = (uint8_t)
1437 					    (ml_pkt->operators | BIT_1);
1438 				}
1439 				if (notify_count > ha->ub_notify_count) {
1440 					ml_pkt->immediate_notify_count =
1441 					    (uint8_t)(notify_count -
1442 					    ha->ub_notify_count);
1443 					ml_pkt->operators = (uint8_t)
1444 					    (ml_pkt->operators | BIT_2);
1445 				} else if (notify_count <
1446 				    ha->ub_notify_count) {
1447 					ml_pkt->immediate_notify_count =
1448 					    (uint8_t)(ha->ub_notify_count -
1449 					    notify_count);
1450 					ml_pkt->operators = (uint8_t)
1451 					    (ml_pkt->operators | BIT_3);
1452 				}
1453 			}
1454 
1455 			/* Update the driver's command/notify count values */
1456 			ha->ub_command_count = cmd_count;
1457 			ha->ub_notify_count = notify_count;
1458 		}
1459 	} else {
1460 		ASSERT(ubcount != 0);
1461 
1462 		/* Enable the Luns for the target mode */
1463 		pkt->entry_type = ENABLE_LUN_TYPE;
1464 
1465 		if (ubcount < 255) {
1466 			/* Save one for immediate notify. */
1467 			if (ubcount > 1) {
1468 				ha->ub_command_count = (uint8_t)(ubcount - 1);
1469 			} else {
1470 				ha->ub_command_count = (uint8_t)ubcount;
1471 			}
1472 			ha->ub_notify_count = 1;
1473 		} else {
1474 			ha->ub_command_count = 255;
1475 			if (ubcount - 255 < 255) {
1476 				ha->ub_notify_count = (uint8_t)(ubcount - 255);
1477 			} else {
1478 				ha->ub_notify_count = 255;
1479 			}
1480 		}
1481 		ha->flags |= TARGET_MODE_INITIALIZED;
1482 
1483 		pkt->command_count = ha->ub_command_count;
1484 		pkt->immediate_notify_count = ha->ub_notify_count;
1485 	}
1486 	ADAPTER_STATE_UNLOCK(ha);
1487 
1488 	/* Issue command to ISP */
1489 	ql_isp_cmd(ha);
1490 
1491 	if (rval != QL_SUCCESS) {
1492 		EL(ha, "failed=%xh\n", rval);
1493 	} else {
1494 		/*EMPTY*/
1495 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1496 	}
1497 	return (rval);
1498 }
1499 
1500 /*
1501  * ql_notify_acknowledge_iocb
1502  *	Setup of notify acknowledge IOCB for pending
1503  *	immediate notify entry.
1504  *
1505  * Input:
1506  *	ha:	adapter state pointer.
1507  *	cmd:	target command context pointer.
1508  *	pkt:	request queue packet.
1509  *
1510  * Context:
1511  *	Interrupt or Kernel context, no mailbox commands allowed.
1512  */
1513 void
1514 ql_notify_acknowledge_iocb(ql_adapter_state_t *ha, tgt_cmd_t *cmd,
1515     notify_acknowledge_entry_t *pkt)
1516 {
1517 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1518 
1519 	pkt->entry_type = NOTIFY_ACKNOWLEDGE_TYPE;
1520 	pkt->initiator_id_l = cmd->initiator_id_l;
1521 	pkt->initiator_id_h = cmd->initiator_id_h;
1522 
1523 	/* Handle LIP reset event. */
1524 	if (cmd->status == 0xe) {
1525 		pkt->flags_l = BIT_5;
1526 	}
1527 
1528 	pkt->flags_h = BIT_0;
1529 	ddi_put16(ha->hba_buf.acc_handle, &pkt->status, cmd->status);
1530 	pkt->task_flags_l = cmd->task_flags_l;
1531 	pkt->task_flags_h = cmd->task_flags_h;
1532 	pkt->sequence_id = cmd->rx_id;
1533 
1534 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1535 }
1536 
1537 /*
1538  * ql_continue_target_io_iocb
1539  *	Setup of continue target I/O IOCB for pending
1540  *	accept target I/O entry.
1541  *
1542  * Input:
1543  *	ha = adapter state pointer.
1544  *	sp = srb structure pointer.
1545  *	arg = request queue packet.
1546  *
1547  * Context:
1548  *	Interrupt or Kernel context, no mailbox commands allowed.
1549  */
1550 void
1551 ql_continue_target_io_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1552 {
1553 	ddi_dma_cookie_t	*cp;
1554 	port_id_t		d_id;
1555 	ql_tgt_t		*tq;
1556 	ctio_entry_t		*pkt = arg;
1557 
1558 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1559 
1560 	d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
1561 	tq = ql_d_id_to_queue(ha, d_id);
1562 
1563 	if (tq == NULL) {
1564 		EL(ha, "Unknown Initiator d_id %xh", d_id.b24);
1565 		return;
1566 	}
1567 
1568 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1569 		pkt->initiator_id_l = LSB(tq->loop_id);
1570 		pkt->initiator_id_h = MSB(tq->loop_id);
1571 	} else {
1572 		pkt->initiator_id_h = LSB(tq->loop_id);
1573 	}
1574 	pkt->rx_id = sp->pkt->pkt_cmd_fhdr.rx_id;
1575 
1576 	/* Set ISP command timeout. */
1577 	if (sp->isp_timeout < 0x1999) {
1578 		ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout,
1579 		    sp->isp_timeout);
1580 	}
1581 
1582 	if (sp->flags & SRB_FCP_DATA_PKT) {
1583 
1584 		if (sp->pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1585 			pkt->flags_l = BIT_6;
1586 		} else if (sp->pkt->pkt_tran_type == FC_PKT_INBOUND) {
1587 			pkt->flags_l = BIT_7;
1588 		}
1589 
1590 		pkt->flags_h = BIT_1;
1591 		/* Set relative offset. */
1592 		ddi_put32(ha->hba_buf.acc_handle,
1593 		    (uint32_t *)(void *)&pkt->relative_offset,
1594 		    (uint32_t)sp->pkt->pkt_cmd_fhdr.ro);
1595 	} else {
1596 		/* (sp->flags & SRB_FCP_RSP_PKT) */
1597 		pkt->flags_l = BIT_7 | BIT_6 | BIT_1;
1598 		pkt->flags_h = BIT_7 | BIT_1;
1599 	}
1600 
1601 	/*
1602 	 * Load data segments.
1603 	 */
1604 	if (sp->pkt->pkt_cmdlen != 0) {
1605 		cp = sp->pkt->pkt_cmd_cookie;
1606 
1607 		/* Transfer length. */
1608 		ddi_put32(ha->hba_buf.acc_handle,
1609 		    (uint32_t *)(void *)&pkt->type.s0_32bit.byte_count,
1610 		    (uint32_t)cp->dmac_size);
1611 
1612 		/* Load data segments. */
1613 		pkt->dseg_count_l = 1;
1614 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1615 			pkt->entry_type = CTIO_TYPE_3;
1616 			ddi_put32(ha->hba_buf.acc_handle,
1617 			    (uint32_t *)(void *)
1618 			    &pkt->type.s0_64bit.dseg_0_address[0],
1619 			    cp->dmac_address);
1620 			ddi_put32(ha->hba_buf.acc_handle,
1621 			    (uint32_t *)(void *)
1622 			    &pkt->type.s0_64bit.dseg_0_address[1],
1623 			    cp->dmac_notused);
1624 			ddi_put32(ha->hba_buf.acc_handle,
1625 			    (uint32_t *)(void *)
1626 			    &pkt->type.s0_64bit.dseg_0_length,
1627 			    (uint32_t)cp->dmac_size);
1628 		} else {
1629 			pkt->entry_type = CTIO_TYPE_2;
1630 			ddi_put32(ha->hba_buf.acc_handle,
1631 			    (uint32_t *)(void *)
1632 			    &pkt->type.s0_32bit.dseg_0_address,
1633 			    cp->dmac_address);
1634 			ddi_put32(ha->hba_buf.acc_handle,
1635 			    (uint32_t *)(void *)
1636 			    &pkt->type.s0_32bit.dseg_0_length,
1637 			    (uint32_t)cp->dmac_size);
1638 		}
1639 	}
1640 
1641 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1642 }
1643 
1644 /*
1645  * ql_continue_target_io_2400_iocb
1646  *	Setup of continue target I/O IOCB for pending
1647  *	accept target I/O entry.
1648  *
1649  * Input:
1650  *	ha = adapter state pointer.
1651  *	sp = srb structure pointer.
1652  *	arg = request queue packet.
1653  *
1654  * Context:
1655  *	Interrupt or Kernel context, no mailbox commands allowed.
1656  */
1657 /* ARGSUSED */
1658 void
1659 ql_continue_target_io_2400_iocb(ql_adapter_state_t *ha, ql_srb_t *sp,
1660     void *arg)
1661 {
1662 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1663 
1664 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1665 }
1666