1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #include "emlxs.h"
29 
30 /* Timer period in seconds */
31 #define	EMLXS_TIMER_PERIOD		1	/* secs */
32 #define	EMLXS_PKT_PERIOD		5	/* secs */
33 #define	EMLXS_UB_PERIOD			60	/* secs */
34 
35 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
36 
37 
38 #ifdef DFC_SUPPORT
39 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
40 #endif	/* DFC_SUPPORT */
41 
42 #ifdef DHCHAP_SUPPORT
43 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
44 #endif	/* DHCHAP_SUPPORT */
45 
46 static void emlxs_timer(void *arg);
47 static void emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
48 static uint32_t emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
49 static void emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
50 static void emlxs_timer_check_linkup(emlxs_hba_t *hba);
51 static void emlxs_timer_check_mbox(emlxs_hba_t *hba);
52 static void emlxs_timer_check_discovery(emlxs_port_t *port);
53 static void emlxs_timer_check_ub(emlxs_port_t *port);
54 static void emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag);
55 static uint32_t emlxs_pkt_chip_timeout(emlxs_port_t *port,
56 	emlxs_buf_t *sbp, Q *abortq, uint8_t *flag);
57 
58 #ifdef TX_WATCHDOG
59 static void emlxs_tx_watchdog(emlxs_hba_t *hba);
60 #endif	/* TX_WATCHDOG */
61 
62 extern clock_t
63 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
64 {
65 	emlxs_config_t *cfg = &CFG;
66 	clock_t time;
67 
68 	/* Set thread timeout */
69 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
70 		(void) drv_getparm(LBOLT, &time);
71 		time += (timeout * drv_usectohz(1000000));
72 	} else {
73 		time = -1;
74 	}
75 
76 	return (time);
77 
78 } /* emlxs_timeout() */
79 
80 
81 static void
82 emlxs_timer(void *arg)
83 {
84 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
85 	emlxs_port_t *port = &PPORT;
86 	uint8_t flag[MAX_RINGS];
87 	uint32_t i;
88 	uint32_t rc;
89 
90 	if (!hba->timer_id) {
91 		return;
92 	}
93 	mutex_enter(&EMLXS_TIMER_LOCK);
94 
95 	/* Only one timer thread is allowed */
96 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
97 		mutex_exit(&EMLXS_TIMER_LOCK);
98 		return;
99 	}
100 	/* Check if a kill request has been made */
101 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
102 		hba->timer_id = 0;
103 		hba->timer_flags |= EMLXS_TIMER_ENDED;
104 
105 		mutex_exit(&EMLXS_TIMER_LOCK);
106 		return;
107 	}
108 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
109 	hba->timer_tics = DRV_TIME;
110 
111 	mutex_exit(&EMLXS_TIMER_LOCK);
112 
113 	/* Exit if we are still initializing */
114 	if (hba->state < FC_LINK_DOWN) {
115 		goto done;
116 	}
117 	bzero((void *) flag, sizeof (flag));
118 
119 	/* Check for mailbox timeout */
120 	emlxs_timer_check_mbox(hba);
121 
122 	/* Check heartbeat timer */
123 	emlxs_timer_check_heartbeat(hba);
124 
125 #ifdef IDLE_TIMER
126 	emlxs_pm_idle_timer(hba);
127 #endif	/* IDLE_TIMER */
128 
129 #ifdef DFC_SUPPORT
130 	/* Check for loopback timeouts */
131 	emlxs_timer_check_loopback(hba);
132 #endif	/* DFC_SUPPORT */
133 
134 	/* Check for packet timeouts */
135 	rc = emlxs_timer_check_pkts(hba, flag);
136 
137 	if (rc) {
138 		/* Link or adapter is being reset */
139 		goto done;
140 	}
141 	/* Check for linkup timeout */
142 	emlxs_timer_check_linkup(hba);
143 
144 	/* Check the ports */
145 	for (i = 0; i < MAX_VPORTS; i++) {
146 		port = &VPORT(i);
147 
148 		if (!(port->flag & EMLXS_PORT_BOUND)) {
149 			continue;
150 		}
151 		/* Check for node gate timeouts */
152 		emlxs_timer_check_nodes(port, flag);
153 
154 		/* Check for tape discovery timeout */
155 		emlxs_timer_check_discovery(port);
156 
157 		/* Check for UB timeouts */
158 		emlxs_timer_check_ub(port);
159 
160 #ifdef DHCHAP_SUPPORT
161 		/* Check for DHCHAP authentication timeouts */
162 		emlxs_timer_check_dhchap(port);
163 #endif	/* DHCHAP_SUPPORT */
164 
165 	}
166 
167 	/* Check for ring service timeouts */
168 	/* Always do this last */
169 	emlxs_timer_check_rings(hba, flag);
170 
171 done:
172 
173 	/* Restart the timer */
174 	mutex_enter(&EMLXS_TIMER_LOCK);
175 
176 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
177 
178 	/* If timer is still enabled, restart it */
179 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
180 		hba->timer_id = timeout(emlxs_timer, (void *) hba,
181 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
182 	} else {
183 		hba->timer_id = 0;
184 		hba->timer_flags |= EMLXS_TIMER_ENDED;
185 	}
186 
187 	mutex_exit(&EMLXS_TIMER_LOCK);
188 
189 	return;
190 
191 } /* emlxs_timer() */
192 
193 
194 extern void
195 emlxs_timer_start(emlxs_hba_t *hba)
196 {
197 	if (hba->timer_id) {
198 		return;
199 	}
200 	/* Restart the timer */
201 	mutex_enter(&EMLXS_TIMER_LOCK);
202 	if (!hba->timer_id) {
203 		hba->timer_flags = 0;
204 		hba->timer_id = timeout(emlxs_timer, (void *)hba,
205 		    drv_usectohz(1000000));
206 	}
207 	mutex_exit(&EMLXS_TIMER_LOCK);
208 
209 } /* emlxs_timer_start() */
210 
211 
212 extern void
213 emlxs_timer_stop(emlxs_hba_t *hba)
214 {
215 	if (!hba->timer_id) {
216 		return;
217 	}
218 	mutex_enter(&EMLXS_TIMER_LOCK);
219 	hba->timer_flags |= EMLXS_TIMER_KILL;
220 
221 	while (hba->timer_id) {
222 		mutex_exit(&EMLXS_TIMER_LOCK);
223 		delay(drv_usectohz(500000));
224 		mutex_enter(&EMLXS_TIMER_LOCK);
225 	}
226 	mutex_exit(&EMLXS_TIMER_LOCK);
227 
228 	return;
229 
230 } /* emlxs_timer_stop() */
231 
232 
233 static uint32_t
234 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
235 {
236 	emlxs_port_t *port = &PPORT;
237 	/* emlxs_port_t *vport; */
238 	emlxs_config_t *cfg = &CFG;
239 	Q tmo;
240 	int32_t ringno;
241 	RING *rp;
242 	NODELIST *nlp;
243 	IOCBQ *prev;
244 	IOCBQ *next;
245 	IOCB *iocb;
246 	IOCBQ *iocbq;
247 	emlxs_buf_t *sbp;
248 	fc_packet_t *pkt;
249 	Q abort;
250 	uint32_t iotag;
251 	uint32_t rc;
252 
253 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
254 		return (0);
255 	}
256 	if (hba->pkt_timer > hba->timer_tics) {
257 		return (0);
258 	}
259 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
260 
261 
262 	bzero((void *) &tmo, sizeof (Q));
263 
264 	/*
265 	 * We must hold the locks here because we never know when an iocb
266 	 * will be removed out from under us
267 	 */
268 
269 	mutex_enter(&EMLXS_RINGTX_LOCK);
270 
271 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
272 		rp = &hba->ring[ringno];
273 
274 		/* Scan the tx queues for each active node on the ring */
275 
276 		/* Get the first node */
277 		nlp = (NODELIST *)rp->nodeq.q_first;
278 
279 		while (nlp) {
280 			/* Scan the node's priority tx queue */
281 			prev = NULL;
282 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
283 
284 			while (iocbq) {
285 				next = (IOCBQ *)iocbq->next;
286 				iocb = &iocbq->iocb;
287 				sbp = (emlxs_buf_t *)iocbq->sbp;
288 
289 				/* Check if iocb has timed out */
290 				if (sbp && hba->timer_tics >= sbp->ticks) {
291 					/* iocb timed out, now deque it */
292 					if (next == NULL) {
293 						nlp->nlp_ptx[ringno].q_last =
294 						    (uint8_t *)prev;
295 					}
296 					if (prev == NULL) {
297 						nlp->nlp_ptx[ringno].q_first =
298 						    (uint8_t *)next;
299 					} else {
300 						prev->next = next;
301 					}
302 
303 					iocbq->next = NULL;
304 					nlp->nlp_ptx[ringno].q_cnt--;
305 
306 					/*
307 					 * Add this iocb to our local timout
308 					 * Q
309 					 */
310 
311 					/*
312 					 * This way we don't hold the RINGTX
313 					 * lock too long
314 					 */
315 
316 					if (tmo.q_first) {
317 						((IOCBQ *)tmo.q_last)->next =
318 						    iocbq;
319 						tmo.q_last = (uint8_t *)iocbq;
320 						tmo.q_cnt++;
321 					} else {
322 						tmo.q_first = (uint8_t *)iocbq;
323 						tmo.q_last = (uint8_t *)iocbq;
324 						tmo.q_cnt = 1;
325 					}
326 					iocbq->next = NULL;
327 
328 				} else {
329 					prev = iocbq;
330 				}
331 
332 				iocbq = next;
333 
334 			}	/* while (iocbq) */
335 
336 
337 			/* Scan the node's tx queue */
338 			prev = NULL;
339 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
340 
341 			while (iocbq) {
342 				next = (IOCBQ *)iocbq->next;
343 				iocb = &iocbq->iocb;
344 				sbp = (emlxs_buf_t *)iocbq->sbp;
345 
346 				/* Check if iocb has timed out */
347 				if (sbp && hba->timer_tics >= sbp->ticks) {
348 					/* iocb timed out, now deque it */
349 					if (next == NULL) {
350 						nlp->nlp_tx[ringno].q_last =
351 						    (uint8_t *)prev;
352 					}
353 					if (prev == NULL) {
354 						nlp->nlp_tx[ringno].q_first =
355 						    (uint8_t *)next;
356 					} else {
357 						prev->next = next;
358 					}
359 
360 					iocbq->next = NULL;
361 					nlp->nlp_tx[ringno].q_cnt--;
362 
363 					/*
364 					 * Add this iocb to our local timout
365 					 * Q
366 					 */
367 
368 					/*
369 					 * This way we don't hold the RINGTX
370 					 * lock too long
371 					 */
372 
373 					/*
374 					 * EMLXS_MSGF(EMLXS_CONTEXT,
375 					 * &emlxs_pkt_timeout_msg, "TXQ
376 					 * abort: Removing iotag=%x qcnt=%d
377 					 * pqcnt=%d", sbp->iotag,
378 					 * nlp->nlp_tx[ringno].q_cnt,
379 					 * nlp->nlp_ptx[ringno].q_cnt);
380 					 */
381 
382 					if (tmo.q_first) {
383 						((IOCBQ *)tmo.q_last)->next =
384 						    iocbq;
385 						tmo.q_last = (uint8_t *)iocbq;
386 						tmo.q_cnt++;
387 					} else {
388 						tmo.q_first = (uint8_t *)iocbq;
389 						tmo.q_last = (uint8_t *)iocbq;
390 						tmo.q_cnt = 1;
391 					}
392 					iocbq->next = NULL;
393 
394 				} else {
395 					prev = iocbq;
396 				}
397 
398 				iocbq = next;
399 
400 			}	/* while (iocbq) */
401 
402 			if (nlp == (NODELIST *) rp->nodeq.q_last) {
403 				nlp = NULL;
404 			} else {
405 				nlp = nlp->nlp_next[ringno];
406 			}
407 
408 		}	/* while(nlp) */
409 
410 	}	/* end of for */
411 
412 	/* Now cleanup the iocb's */
413 	iocbq = (IOCBQ *)tmo.q_first;
414 	while (iocbq) {
415 		/* Free the IoTag and the bmp */
416 		iocb = &iocbq->iocb;
417 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
418 		ringno = ((RING *)iocbq->ring)->ringno;
419 
420 		if (sbp && (sbp != STALE_PACKET)) {
421 			mutex_enter(&sbp->mtx);
422 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
423 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
424 				hba->ring_tx_count[ringno]--;
425 			}
426 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
427 			mutex_exit(&sbp->mtx);
428 		}
429 		iocbq = (IOCBQ *)iocbq->next;
430 
431 	}	/* end of while */
432 
433 	mutex_exit(&EMLXS_RINGTX_LOCK);
434 
435 	/* Now complete the transmit timeouts outside the locks */
436 	iocbq = (IOCBQ *)tmo.q_first;
437 	while (iocbq) {
438 		/* Save the next iocbq for now */
439 		next = (IOCBQ *)iocbq->next;
440 
441 		/* Unlink this iocbq */
442 		iocbq->next = NULL;
443 
444 		/* Get the pkt */
445 		sbp = (emlxs_buf_t *)iocbq->sbp;
446 
447 		if (sbp) {
448 			/*
449 			 * Warning: Some FCT sbp's don't have fc_packet
450 			 * objects
451 			 */
452 			pkt = PRIV2PKT(sbp);
453 
454 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
455 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
456 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
457 
458 			if (hba->state >= FC_LINK_UP) {
459 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
460 				    IOERR_ABORT_TIMEOUT, 1);
461 			} else {
462 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
463 				    IOERR_LINK_DOWN, 1);
464 			}
465 		}
466 		iocbq = next;
467 
468 	}	/* end of while */
469 
470 
471 
472 	/* Now check the chip */
473 	bzero((void *) &abort, sizeof (Q));
474 
475 	/* Check the rings */
476 	rc = 0;
477 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
478 		rp = &hba->ring[ringno];
479 
480 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
481 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
482 			sbp = rp->fc_table[iotag];
483 			if (sbp && (sbp != STALE_PACKET) &&
484 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
485 			    (hba->timer_tics >= sbp->ticks)) {
486 				rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
487 				    sbp, &abort, flag);
488 
489 				if (rc) {
490 					break;
491 				}
492 			}
493 		}
494 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
495 
496 		if (rc) {
497 			break;
498 		}
499 	}
500 
501 	/* Now put the iocb's on the tx queue */
502 	iocbq = (IOCBQ *)abort.q_first;
503 	while (iocbq) {
504 		/* Save the next iocbq for now */
505 		next = (IOCBQ *)iocbq->next;
506 
507 		/* Unlink this iocbq */
508 		iocbq->next = NULL;
509 
510 		/* Send this iocbq */
511 		emlxs_tx_put(iocbq, 1);
512 
513 		iocbq = next;
514 	}
515 
516 	if (rc == 1) {
517 		/* Spawn a thread to reset the link */
518 		(void) thread_create(NULL, 0, emlxs_reset_link_thread,
519 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
520 	} else if (rc == 2) {
521 		/* Spawn a thread to reset the adapter */
522 		(void) thread_create(NULL, 0, emlxs_restart_thread,
523 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
524 	}
525 	return (rc);
526 
527 } /* emlxs_timer_check_pkts() */
528 
529 
530 
531 static void
532 emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag)
533 {
534 	emlxs_port_t *port = &PPORT;
535 	emlxs_config_t *cfg = &CFG;
536 	int32_t ringno;
537 	RING *rp;
538 
539 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
540 		return;
541 	}
542 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
543 		rp = &hba->ring[ringno];
544 
545 		/* Check for ring timeout now */
546 		mutex_enter(&EMLXS_RINGTX_LOCK);
547 		if (rp->timeout && (hba->timer_tics >= rp->timeout)) {
548 			/* Check if there is still work to do on the ring and */
549 			/* the link is still up */
550 			if (rp->nodeq.q_first) {
551 				flag[ringno] = 1;
552 				rp->timeout = hba->timer_tics + 10;
553 
554 				if (hba->state >= FC_LINK_UP) {
555 					EMLXS_MSGF(EMLXS_CONTEXT,
556 					    &emlxs_ring_watchdog_msg,
557 					    "%s host=%d port=%d cnt=%d,%d",
558 					    emlxs_ring_xlate(ringno),
559 					    rp->fc_cmdidx,
560 					    rp->fc_port_cmdidx,
561 					    hba->ring_tx_count[ringno],
562 					    hba->io_count[ringno]);
563 				}
564 			} else {
565 				rp->timeout = 0;
566 			}
567 		}
568 		mutex_exit(&EMLXS_RINGTX_LOCK);
569 
570 		/*
571 		 * If ring flag is set, request iocb servicing here to send
572 		 * any iocb's that may still be queued
573 		 */
574 		if (flag[ringno]) {
575 			emlxs_issue_iocb_cmd(hba, rp, 0);
576 		}
577 	}
578 
579 	return;
580 
581 } /* emlxs_timer_check_rings() */
582 
583 
584 static void
585 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
586 {
587 	emlxs_hba_t *hba = HBA;
588 	uint32_t found;
589 	uint32_t i;
590 	NODELIST *nlp;
591 	int32_t ringno;
592 
593 	for (;;) {
594 		/* Check node gate flag for expiration */
595 		found = 0;
596 
597 		/*
598 		 * We need to lock, scan, and unlock because we can't hold
599 		 * the lock while we call node_open
600 		 */
601 		rw_enter(&port->node_rwlock, RW_READER);
602 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
603 			nlp = port->node_table[i];
604 			while (nlp != NULL) {
605 				for (ringno = 0;
606 				    ringno < hba->ring_count;
607 				    ringno++) {
608 					/*
609 					 * Check if the node timer is active
610 					 * and if timer has expired
611 					 */
612 					if ((nlp->nlp_flag[ringno] &
613 					    NLP_TIMER) &&
614 					    nlp->nlp_tics[ringno] &&
615 					    (hba->timer_tics >=
616 					    nlp->nlp_tics[ringno])) {
617 						/*
618 						 * If so, set the flag and
619 						 * break out
620 						 */
621 						found = 1;
622 						flag[ringno] = 1;
623 						break;
624 					}
625 				}
626 
627 				if (found) {
628 					break;
629 				}
630 				nlp = nlp->nlp_list_next;
631 			}
632 
633 			if (found) {
634 				break;
635 			}
636 		}
637 		rw_exit(&port->node_rwlock);
638 
639 		if (!found) {
640 			break;
641 		}
642 		emlxs_node_open(port, nlp, ringno);
643 	}
644 
645 } /* emlxs_timer_check_nodes() */
646 
647 
648 #ifdef DFC_SUPPORT
649 static void
650 emlxs_timer_check_loopback(emlxs_hba_t *hba)
651 {
652 	emlxs_port_t *port = &PPORT;
653 	emlxs_config_t *cfg = &CFG;
654 	int32_t reset = 0;
655 
656 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
657 		return;
658 	}
659 	/* Check the loopback timer for expiration */
660 	mutex_enter(&EMLXS_PORT_LOCK);
661 
662 	if (!hba->loopback_tics ||
663 	    (hba->timer_tics < hba->loopback_tics)) {
664 		mutex_exit(&EMLXS_PORT_LOCK);
665 		return;
666 	}
667 	hba->loopback_tics = 0;
668 
669 	if (hba->flag & FC_LOOPBACK_MODE) {
670 		reset = 1;
671 	}
672 	mutex_exit(&EMLXS_PORT_LOCK);
673 
674 	if (reset) {
675 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
676 		    "LOOPBACK_MODE: Expired. Resetting...");
677 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
678 	}
679 	return;
680 
681 } /* emlxs_timer_check_loopback() */
682 #endif	/* DFC_SUPPORT  */
683 
684 
685 static void
686 emlxs_timer_check_linkup(emlxs_hba_t *hba)
687 {
688 	emlxs_port_t *port = &PPORT;
689 	uint32_t linkup;
690 
691 	/* Check the linkup timer for expiration */
692 	mutex_enter(&EMLXS_PORT_LOCK);
693 	linkup = 0;
694 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
695 		hba->linkup_timer = 0;
696 
697 		/* Make sure link is still ready */
698 		if (hba->state >= FC_LINK_UP) {
699 			linkup = 1;
700 		}
701 	}
702 	mutex_exit(&EMLXS_PORT_LOCK);
703 
704 	/* Make the linkup callback */
705 	if (linkup) {
706 		emlxs_port_online(port);
707 	}
708 	return;
709 
710 } /* emlxs_timer_check_linkup() */
711 
712 
713 static void
714 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
715 {
716 	emlxs_port_t *port = &PPORT;
717 	MAILBOX *mb;
718 	emlxs_config_t *cfg = &CFG;
719 
720 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
721 		return;
722 	}
723 	if (hba->timer_tics < hba->heartbeat_timer) {
724 		return;
725 	}
726 	hba->heartbeat_timer = hba->timer_tics + 5;
727 
728 	/* Return if adapter interrupts have occurred */
729 	if (hba->heartbeat_flag) {
730 		hba->heartbeat_flag = 0;
731 		return;
732 	}
733 	/* No adapter interrupts have occured for 5 seconds now */
734 
735 	/* Return if mailbox is busy */
736 	/* This means the mailbox timer routine is watching for problems */
737 	if (hba->mbox_timer) {
738 		return;
739 	}
740 	/* Return if heartbeat is still outstanding */
741 	if (hba->heartbeat_active) {
742 		return;
743 	}
744 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
745 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
746 		    "Unable to allocate heartbeat mailbox.");
747 		return;
748 	}
749 	emlxs_mb_heartbeat(hba, mb);
750 	hba->heartbeat_active = 1;
751 
752 	if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) {
753 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
754 	}
755 	return;
756 
757 } /* emlxs_timer_check_heartbeat() */
758 
759 
760 
761 static void
762 emlxs_timer_check_mbox(emlxs_hba_t *hba)
763 {
764 	emlxs_port_t *port = &PPORT;
765 	emlxs_config_t *cfg = &CFG;
766 	MAILBOX *mb;
767 	uint32_t word0;
768 	uint32_t offset;
769 	uint32_t ha_copy = 0;
770 
771 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
772 		return;
773 	}
774 	mutex_enter(&EMLXS_PORT_LOCK);
775 
776 	/* Return if timer hasn't expired */
777 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
778 		mutex_exit(&EMLXS_PORT_LOCK);
779 		return;
780 	}
781 	hba->mbox_timer = 0;
782 
783 	/* Mailbox timed out, first check for error attention */
784 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
785 
786 	if (ha_copy & HA_ERATT) {
787 		mutex_exit(&EMLXS_PORT_LOCK);
788 		emlxs_handle_ff_error(hba);
789 		return;
790 	}
791 	if (hba->mbox_queue_flag) {
792 		/* Get first word of mailbox */
793 		if (hba->flag & FC_SLIM2_MODE) {
794 			mb = FC_SLIM2_MAILBOX(hba);
795 			offset = (off_t)((uint64_t)(unsigned long)mb -
796 			    (uint64_t)(unsigned long)hba->slim2.virt);
797 
798 			emlxs_mpdata_sync(hba->slim2.dma_handle,
799 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
800 			word0 = *((volatile uint32_t *) mb);
801 			word0 = PCIMEM_LONG(word0);
802 		} else {
803 			mb = FC_SLIM1_MAILBOX(hba);
804 			word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
805 		}
806 
807 		mb = (MAILBOX *) & word0;
808 
809 		/* Check if mailbox has actually completed */
810 		if (mb->mbxOwner == OWN_HOST) {
811 			/*
812 			 * Read host attention register to determine
813 			 * interrupt source
814 			 */
815 			uint32_t ha_copy = READ_CSR_REG(hba,
816 			    FC_HA_REG(hba, hba->csr_addr));
817 
818 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
819 			    "Mailbox attention missed: %s."
820 			    "Forcing event. hc = %x ha = %x",
821 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
822 			    hba->hc_copy, ha_copy);
823 
824 			mutex_exit(&EMLXS_PORT_LOCK);
825 
826 			(void) emlxs_handle_mb_event(hba);
827 
828 			return;
829 		}
830 		if (hba->mbox_mbq) {
831 			mb = (MAILBOX *)hba->mbox_mbq;
832 		}
833 	}
834 	switch (hba->mbox_queue_flag) {
835 	case MBX_NOWAIT:
836 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
837 		    "Mailbox Timeout: %s: Nowait.",
838 		    emlxs_mb_cmd_xlate(mb->mbxCommand));
839 		break;
840 
841 	case MBX_SLEEP:
842 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
843 		    "Mailbox Timeout: %s: mb=%p Sleep.",
844 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
845 		break;
846 
847 	case MBX_POLL:
848 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
849 		    "Mailbox Timeout: %s: mb=%p Polled.",
850 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
851 		break;
852 
853 	default:
854 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
855 		    "Mailbox Timeout.");
856 		break;
857 	}
858 
859 	hba->flag |= FC_MBOX_TIMEOUT;
860 	emlxs_ffstate_change_locked(hba, FC_ERROR);
861 
862 	mutex_exit(&EMLXS_PORT_LOCK);
863 
864 	/* Perform mailbox cleanup */
865 	/* This will wake any sleeping or polling threads */
866 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
867 
868 	/* Trigger adapter shutdown */
869 	(void) thread_create(NULL, 0, emlxs_shutdown_thread, (char *)hba, 0,
870 	    &p0, TS_RUN, v.v_maxsyspri - 2);
871 
872 	return;
873 
874 } /* emlxs_timer_check_mbox() */
875 
876 
877 
878 static void
879 emlxs_timer_check_discovery(emlxs_port_t *port)
880 {
881 	emlxs_hba_t *hba = HBA;
882 	emlxs_config_t *cfg = &CFG;
883 	int32_t send_clear_la;
884 	uint32_t found;
885 	uint32_t i;
886 	NODELIST *nlp;
887 	MAILBOXQ *mbox;
888 
889 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
890 		return;
891 	}
892 	/* Check the discovery timer for expiration */
893 	send_clear_la = 0;
894 	mutex_enter(&EMLXS_PORT_LOCK);
895 	while (hba->discovery_timer &&
896 	    (hba->timer_tics >= hba->discovery_timer) &&
897 	    (hba->state == FC_LINK_UP)) {
898 		send_clear_la = 1;
899 
900 		/* Perform a flush on fcp2 nodes that are still closed */
901 		found = 0;
902 		rw_enter(&port->node_rwlock, RW_READER);
903 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
904 			nlp = port->node_table[i];
905 			while (nlp != NULL) {
906 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
907 				    (nlp->nlp_flag[FC_FCP_RING] &
908 				    NLP_CLOSED)) {
909 					found = 1;
910 					break;
911 
912 				}
913 				nlp = nlp->nlp_list_next;
914 			}
915 
916 			if (found) {
917 				break;
918 			}
919 		}
920 		rw_exit(&port->node_rwlock);
921 
922 		if (!found) {
923 			break;
924 		}
925 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
926 		    "FCP2 device (did=%06x) missing. Flushing...",
927 		    nlp->nlp_DID);
928 
929 		mutex_exit(&EMLXS_PORT_LOCK);
930 
931 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
932 
933 		mutex_enter(&EMLXS_PORT_LOCK);
934 
935 	}
936 	mutex_exit(&EMLXS_PORT_LOCK);
937 
938 	/* Try to send clear link attention, if needed */
939 	if ((send_clear_la == 1) &&
940 	    (mbox = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
941 		mutex_enter(&EMLXS_PORT_LOCK);
942 
943 		/*
944 		 * If state is not FC_LINK_UP, then either the link has gone
945 		 * down or a FC_CLEAR_LA has already been issued
946 		 */
947 		if (hba->state != FC_LINK_UP) {
948 			mutex_exit(&EMLXS_PORT_LOCK);
949 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
950 		} else {
951 			/* Change state and clear discovery timer */
952 			emlxs_ffstate_change_locked(hba, FC_CLEAR_LA);
953 
954 			hba->discovery_timer = 0;
955 
956 			mutex_exit(&EMLXS_PORT_LOCK);
957 
958 			/* Prepare and send the CLEAR_LA command */
959 			emlxs_mb_clear_la(hba, (MAILBOX *)mbox);
960 
961 			if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mbox,
962 			    MBX_NOWAIT, 0) != MBX_BUSY) {
963 				(void) emlxs_mem_put(hba, MEM_MBOX,
964 				    (uint8_t *)mbox);
965 			}
966 		}
967 	}
968 	return;
969 
970 } /* emlxs_timer_check_discovery()  */
971 
972 
973 static void
974 emlxs_timer_check_ub(emlxs_port_t *port)
975 {
976 	emlxs_hba_t *hba = HBA;
977 	emlxs_unsol_buf_t *ulistp;
978 	fc_unsol_buf_t *ubp;
979 	emlxs_ub_priv_t *ub_priv;
980 	uint32_t i;
981 
982 	if (port->ub_timer > hba->timer_tics) {
983 		return;
984 	}
985 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
986 
987 	/* Check the unsolicited buffers */
988 	mutex_enter(&EMLXS_UB_LOCK);
989 
990 	ulistp = port->ub_pool;
991 	while (ulistp) {
992 		/* Check buffers in this pool */
993 		for (i = 0; i < ulistp->pool_nentries; i++) {
994 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
995 			ub_priv = ubp->ub_fca_private;
996 
997 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
998 				continue;
999 			}
1000 			/*
1001 			 * If buffer has timed out, print message and
1002 			 * increase timeout
1003 			 */
1004 			if ((ub_priv->time + ub_priv->timeout) <=
1005 			    hba->timer_tics) {
1006 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
1007 
1008 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1009 				    "Stale UB buffer detected (%d mins): "
1010 				    "buffer = %p (%x,%x,%x,%x)",
1011 				    (ub_priv->timeout / 60),
1012 				    ubp, ubp->ub_frame.type,
1013 				    ubp->ub_frame.s_id,
1014 				    ubp->ub_frame.ox_id,
1015 				    ubp->ub_frame.rx_id);
1016 
1017 				/* Increase timeout period */
1018 
1019 				/*
1020 				 * If timeout was 5 mins or less, increase it
1021 				 * to 10 mins
1022 				 */
1023 				if (ub_priv->timeout <= (5 * 60)) {
1024 					ub_priv->timeout = (10 * 60);
1025 				}
1026 				/*
1027 				 * If timeout was 10 mins or less, increase
1028 				 * it to 30 mins
1029 				 */
1030 				else if (ub_priv->timeout <= (10 * 60)) {
1031 					ub_priv->timeout = (30 * 60);
1032 				}
1033 				/* Otherwise double it. */
1034 				else {
1035 					ub_priv->timeout *= 2;
1036 				}
1037 			}
1038 		}
1039 
1040 		ulistp = ulistp->pool_next;
1041 	}
1042 
1043 	mutex_exit(&EMLXS_UB_LOCK);
1044 
1045 	return;
1046 
1047 } /* emlxs_timer_check_ub()  */
1048 
1049 
1050 /* EMLXS_FCTAB_LOCK must be held to call this */
1051 static uint32_t
1052 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
1053 	Q *abortq, uint8_t *flag)
1054 {
1055 	emlxs_hba_t *hba = HBA;
1056 	RING *rp = (RING *) sbp->ring;
1057 	IOCBQ *iocbq = NULL;
1058 	fc_packet_t *pkt;
1059 	uint32_t rc = 0;
1060 
1061 	mutex_enter(&sbp->mtx);
1062 
1063 	/* Warning: Some FCT sbp's don't have fc_packet objects */
1064 	pkt = PRIV2PKT(sbp);
1065 
1066 	switch (sbp->abort_attempts) {
1067 	case 0:
1068 
1069 		/* Create the abort IOCB */
1070 		if (hba->state >= FC_LINK_UP) {
1071 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1072 			    "chipQ: 1:Aborting. sbp=%p iotag=%x tmo=%d",
1073 			    sbp, sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
1074 
1075 			iocbq = emlxs_create_abort_xri_cn(port, sbp->node,
1076 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
1077 
1078 			/*
1079 			 * The adapter will make 2 attempts to send ABTS with
1080 			 * 2*ratov timeout each time
1081 			 */
1082 			sbp->ticks = hba->timer_tics + (4 * hba->fc_ratov) + 10;
1083 		} else {
1084 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1085 			    "chipQ: 1:Closing. sbp=%p iotag=%x tmo=%d",
1086 			    sbp, sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
1087 
1088 			iocbq = emlxs_create_close_xri_cn(port, sbp->node,
1089 			    sbp->iotag, rp);
1090 
1091 			sbp->ticks = hba->timer_tics + 30;
1092 		}
1093 
1094 		/* set the flags */
1095 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1096 
1097 		flag[rp->ringno] = 1;
1098 		rc = 0;
1099 
1100 		break;
1101 
1102 	case 1:
1103 
1104 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1105 		    "chipQ: 2:Closing. sbp=%p iotag=%x",
1106 		    sbp, sbp->iotag);
1107 
1108 		iocbq = emlxs_create_close_xri_cn(port, sbp->node,
1109 		    sbp->iotag, rp);
1110 
1111 		sbp->ticks = hba->timer_tics + 30;
1112 
1113 		flag[rp->ringno] = 1;
1114 		rc = 0;
1115 
1116 		break;
1117 
1118 	case 2:
1119 
1120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1121 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x",
1122 		    sbp, sbp->iotag);
1123 
1124 		sbp->ticks = hba->timer_tics + 60;
1125 		rc = 1;
1126 
1127 		break;
1128 
1129 	default:
1130 
1131 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1132 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1133 		    sbp->abort_attempts, sbp, sbp->iotag);
1134 
1135 		sbp->ticks = hba->timer_tics + 60;
1136 		rc = 2;
1137 
1138 		break;
1139 	}
1140 
1141 	sbp->abort_attempts++;
1142 	mutex_exit(&sbp->mtx);
1143 
1144 	if (iocbq) {
1145 		if (abortq->q_first) {
1146 			((IOCBQ *) abortq->q_last)->next = iocbq;
1147 			abortq->q_last = (uint8_t *)iocbq;
1148 			abortq->q_cnt++;
1149 		} else {
1150 			abortq->q_first = (uint8_t *)iocbq;
1151 			abortq->q_last = (uint8_t *)iocbq;
1152 			abortq->q_cnt = 1;
1153 		}
1154 		iocbq->next = NULL;
1155 	}
1156 	return (rc);
1157 
1158 } /* emlxs_pkt_chip_timeout() */
1159 
1160 
1161 #ifdef TX_WATCHDOG
1162 
1163 static void
1164 emlxs_tx_watchdog(emlxs_hba_t *hba)
1165 {
1166 	emlxs_port_t *port = &PPORT;
1167 	NODELIST *nlp;
1168 	uint32_t ringno;
1169 	RING *rp;
1170 	IOCBQ *next;
1171 	IOCBQ *iocbq;
1172 	IOCB *iocb;
1173 	uint32_t found;
1174 	MATCHMAP *bmp;
1175 	Q abort;
1176 	uint32_t iotag;
1177 	emlxs_buf_t *sbp;
1178 	fc_packet_t *pkt = NULL;
1179 	uint32_t cmd;
1180 	uint32_t did;
1181 
1182 	bzero((void *) &abort, sizeof (Q));
1183 
1184 	mutex_enter(&EMLXS_RINGTX_LOCK);
1185 
1186 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
1187 		rp = &hba->ring[ringno];
1188 
1189 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1190 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
1191 			sbp = rp->fc_table[iotag];
1192 			if (sbp && (sbp != STALE_PACKET) &&
1193 			    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1194 				nlp = sbp->node;
1195 				iocbq = &sbp->iocbq;
1196 
1197 				if (iocbq->flag & IOCB_PRIORITY) {
1198 					iocbq = (IOCBQ *)
1199 					    nlp->nlp_ptx[ringno].q_first;
1200 				} else {
1201 					iocbq = (IOCBQ *)
1202 					    nlp->nlp_tx[ringno].q_first;
1203 				}
1204 
1205 				/* Find a matching entry */
1206 				found = 0;
1207 				while (iocbq) {
1208 					if (iocbq == &sbp->iocbq) {
1209 						found = 1;
1210 						break;
1211 					}
1212 					iocbq = (IOCBQ *) iocbq->next;
1213 				}
1214 
1215 				if (!found) {
1216 					if (!(sbp->pkt_flags & PACKET_STALE)) {
1217 						mutex_enter(&sbp->mtx);
1218 						sbp->pkt_flags |= PACKET_STALE;
1219 						mutex_exit(&sbp->mtx);
1220 					} else {
1221 						if (abort.q_first == 0) {
1222 							abort.q_first =
1223 							    &sbp->iocbq;
1224 							abort.q_last =
1225 							    &sbp->iocbq;
1226 						} else {
1227 							((IOCBQ *)
1228 							    abort.q_last)->next=
1229 							    &sbp->iocbq;
1230 						}
1231 
1232 						abort.q_cnt++;
1233 					}
1234 
1235 				} else {
1236 					if ((sbp->pkt_flags & PACKET_STALE)) {
1237 						mutex_enter(&sbp->mtx);
1238 						sbp->pkt_flags &= ~PACKET_STALE;
1239 						mutex_exit(&sbp->mtx);
1240 					}
1241 				}
1242 			}
1243 		}
1244 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1245 	}
1246 
1247 	iocbq = (IOCBQ *) abort.q_first;
1248 	while (iocbq) {
1249 		next = (IOCBQ *) iocbq->next;
1250 		iocbq->next = NULL;
1251 		sbp = (emlxs_buf_t *)iocbq->sbp;
1252 
1253 		pkt = PRIV2PKT(sbp);
1254 		if (pkt) {
1255 			did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
1256 			cmd = *((uint32_t *)pkt->pkt_cmd);
1257 			cmd = SWAP_DATA32(cmd);
1258 
1259 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_txq_watchdog_msg,
1260 			    "sbp=%p node=%p cmd=%08x did=%x",
1261 			    sbp, sbp->node, cmd, did);
1262 		} else {
1263 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_txq_watchdog_msg,
1264 			    "sbp=%p node=%p",
1265 			    sbp, sbp->node);
1266 		}
1267 
1268 
1269 		emlxs_tx_put(iocbq, 0);
1270 
1271 		iocbq = next;
1272 
1273 	}	/* end of while */
1274 
1275 	mutex_exit(&EMLXS_RINGTX_LOCK);
1276 
1277 	return;
1278 
1279 } /* emlxs_tx_watchdog() */
1280 
1281 #endif	/* TX_WATCHDOG */
1282 
1283 
1284 #ifdef DHCHAP_SUPPORT
1285 
1286 static void
1287 emlxs_timer_check_dhchap(emlxs_port_t *port)
1288 {
1289 	emlxs_hba_t *hba = HBA;
1290 	uint32_t i;
1291 	NODELIST *ndlp = NULL;
1292 
1293 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1294 		ndlp = port->node_table[i];
1295 
1296 		if (!ndlp) {
1297 			continue;
1298 		}
1299 		/* Check authentication response timeout */
1300 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1301 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1302 			/* Trigger authresp timeout handler */
1303 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1304 		}
1305 		/* Check reauthentication timeout */
1306 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1307 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1308 			/* Trigger reauth timeout handler */
1309 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1310 		}
1311 	}
1312 	return;
1313 
1314 } /* emlxs_timer_check_dhchap */
1315 
1316 #endif	/* DHCHAP_SUPPORT */
1317