xref: /freebsd/sys/dev/ocs_fc/ocs_fabric.c (revision 95ee2897)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  *
35  * This file implements remote node state machines for:
36  * - Fabric logins.
37  * - Fabric controller events.
38  * - Name/directory services interaction.
39  * - Point-to-point logins.
40  */
41 
42 /*!
43 @defgroup fabric_sm Node State Machine: Fabric States
44 @defgroup ns_sm Node State Machine: Name/Directory Services States
45 @defgroup p2p_sm Node State Machine: Point-to-Point Node States
46 */
47 
48 #include "ocs.h"
49 #include "ocs_fabric.h"
50 #include "ocs_els.h"
51 #include "ocs_device.h"
52 
53 static void ocs_fabric_initiate_shutdown(ocs_node_t *node);
54 static void * __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
55 static int32_t ocs_start_ns_node(ocs_sport_t *sport);
56 static int32_t ocs_start_fabctl_node(ocs_sport_t *sport);
57 static int32_t ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len);
58 static void ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata);
59 static uint64_t ocs_get_wwpn(fc_plogi_payload_t *sp);
60 static void gidpt_delay_timer_cb(void *arg);
61 
62 /**
63  * @ingroup fabric_sm
64  * @brief Fabric node state machine: Initial state.
65  *
66  * @par Description
67  * Send an FLOGI to a well-known fabric.
68  *
69  * @param ctx Remote node sm context.
70  * @param evt Event to process.
71  * @param arg Per event optional argument.
72  *
73  * @return Returns NULL.
74  */
75 void *
__ocs_fabric_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)76 __ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
77 {
78 	std_node_state_decl();
79 
80 	node_sm_trace();
81 
82 	switch(evt) {
83 	case OCS_EVT_REENTER:	/* not sure why we're getting these ... */
84 		ocs_log_debug(node->ocs, ">>> reenter !!\n");
85 		/* fall through */
86 	case OCS_EVT_ENTER:
87 		/* sm: / send FLOGI */
88 		ocs_send_flogi(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
89 		ocs_node_transition(node, __ocs_fabric_flogi_wait_rsp, NULL);
90 		break;
91 
92 	default:
93 		__ocs_fabric_common(__func__, ctx, evt, arg);
94 		break;
95 	}
96 
97 	return NULL;
98 }
99 
100 /**
101  * @ingroup fabric_sm
102  * @brief Set sport topology.
103  *
104  * @par Description
105  * Set sport topology.
106  *
107  * @param node Pointer to the node for which the topology is set.
108  * @param topology Topology to set.
109  *
110  * @return Returns NULL.
111  */
112 void
ocs_fabric_set_topology(ocs_node_t * node,ocs_sport_topology_e topology)113 ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology)
114 {
115 	node->sport->topology = topology;
116 }
117 
118 /**
119  * @ingroup fabric_sm
120  * @brief Notify sport topology.
121  * @par Description
122  * notify sport topology.
123  * @param node Pointer to the node for which the topology is set.
124  * @return Returns NULL.
125  */
126 void
ocs_fabric_notify_topology(ocs_node_t * node)127 ocs_fabric_notify_topology(ocs_node_t *node)
128 {
129 	ocs_node_t *tmp_node;
130 	ocs_node_t *next;
131 	ocs_sport_topology_e topology = node->sport->topology;
132 
133 	/* now loop through the nodes in the sport and send topology notification */
134 	ocs_sport_lock(node->sport);
135 	ocs_list_foreach_safe(&node->sport->node_list, tmp_node, next) {
136 		if (tmp_node != node) {
137 			ocs_node_post_event(tmp_node, OCS_EVT_SPORT_TOPOLOGY_NOTIFY, (void *)topology);
138 		}
139 	}
140 	ocs_sport_unlock(node->sport);
141 }
142 
143 /**
144  * @ingroup fabric_sm
145  * @brief Fabric node state machine: Wait for an FLOGI response.
146  *
147  * @par Description
148  * Wait for an FLOGI response event.
149  *
150  * @param ctx Remote node state machine context.
151  * @param evt Event to process.
152  * @param arg Per event optional argument.
153  *
154  * @return Returns NULL.
155  */
156 
157 void *
__ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)158 __ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
159 {
160 	ocs_node_cb_t *cbdata = arg;
161 	std_node_state_decl();
162 
163 	node_sm_trace();
164 
165 	switch(evt) {
166 	case OCS_EVT_SRRS_ELS_REQ_OK: {
167 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
168 			return NULL;
169 		}
170 		ocs_assert(node->els_req_cnt, NULL);
171 		node->els_req_cnt--;
172 
173 		ocs_domain_save_sparms(node->sport->domain, cbdata->els->els_rsp.virt);
174 
175 		ocs_display_sparams(node->display_name, "flogi rcvd resp", 0, NULL,
176 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
177 
178 		/* Check to see if the fabric is an F_PORT or and N_PORT */
179 		if (ocs_rnode_is_nport(cbdata->els->els_rsp.virt)) {
180 			/* sm: if nport and p2p_winner / ocs_domain_attach */
181 			ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
182 			if (ocs_p2p_setup(node->sport)) {
183 				node_printf(node, "p2p setup failed, shutting down node\n");
184 				node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
185 				ocs_fabric_initiate_shutdown(node);
186 			} else {
187 				if (node->sport->p2p_winner) {
188 					ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
189 					if (!node->sport->domain->attached) {
190 						node_printf(node, "p2p winner, domain not attached\n");
191 						ocs_domain_attach(node->sport->domain, node->sport->p2p_port_id);
192 					} else {
193 						/* already attached, just send ATTACH_OK */
194 						node_printf(node, "p2p winner, domain already attached\n");
195 						ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
196 					}
197 				} else {
198 					/* peer is p2p winner; PLOGI will be received on the
199 					 * remote SID=1 node; this node has served its purpose
200 					 */
201 					node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
202 					ocs_fabric_initiate_shutdown(node);
203 				}
204 			}
205 		} else {
206 			/* sm: if not nport / ocs_domain_attach */
207 			/* ext_status has the fc_id, attach domain */
208 			ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_FABRIC);
209 			ocs_fabric_notify_topology(node);
210 			ocs_assert(!node->sport->domain->attached, NULL);
211 			ocs_domain_attach(node->sport->domain, cbdata->ext_status);
212 			ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
213 		}
214 
215 		break;
216 	}
217 
218 	case OCS_EVT_ELS_REQ_ABORTED:
219 	case OCS_EVT_SRRS_ELS_REQ_RJT:
220 	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
221 		ocs_sport_t *sport = node->sport;
222 		/*
223 		 * with these errors, we have no recovery, so shutdown the sport, leave the link
224 		 * up and the domain ready
225 		 */
226 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
227 			return NULL;
228 		}
229 		ocs_assert(node->els_req_cnt, NULL);
230 		node->els_req_cnt--;
231 
232 		if (node->sport->topology == OCS_SPORT_TOPOLOGY_P2P && !node->sport->p2p_winner) {
233 			node_printf(node, "FLOGI failed, peer p2p winner, shutdown node\n");
234 			node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
235 			ocs_fabric_initiate_shutdown(node);
236 			break;
237 		}
238 
239 		node_printf(node, "FLOGI failed evt=%s, shutting down sport [%s]\n", ocs_sm_event_name(evt),
240 			sport->display_name);
241 		ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
242 		break;
243 	}
244 
245 	default:
246 		__ocs_fabric_common(__func__, ctx, evt, arg);
247 		break;
248 	}
249 
250 	return NULL;
251 }
252 
253 /**
254  * @ingroup fabric_sm
255  * @brief Fabric node state machine: Initial state for a virtual port.
256  *
257  * @par Description
258  * State entered when a virtual port is created. Send FDISC.
259  *
260  * @param ctx Remote node state machine context.
261  * @param evt Event to process.
262  * @param arg Per event optional argument.
263  *
264  * @return Returns NULL.
265  */
266 void *
__ocs_vport_fabric_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)267 __ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
268 {
269 	std_node_state_decl();
270 
271 	node_sm_trace();
272 
273 	switch(evt) {
274 	case OCS_EVT_ENTER:
275 		/* sm: send FDISC */
276 		ocs_send_fdisc(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
277 		ocs_node_transition(node, __ocs_fabric_fdisc_wait_rsp, NULL);
278 		break;
279 
280 	default:
281 		__ocs_fabric_common(__func__, ctx, evt, arg);
282 		break;
283 	}
284 
285 	return NULL;
286 }
287 
288 /**
289  * @ingroup fabric_sm
290  * @brief Fabric node state machine: Wait for an FDISC response
291  *
292  * @par Description
293  * Used for a virtual port. Waits for an FDISC response. If OK, issue a HW port attach.
294  *
295  * @param ctx Remote node state machine context.
296  * @param evt Event to process.
297  * @param arg Per event optional argument.
298  *
299  * @return Returns NULL.
300  */
301 void *
__ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)302 __ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
303 {
304 	ocs_node_cb_t *cbdata = arg;
305 	std_node_state_decl();
306 
307 	node_sm_trace();
308 
309 	switch(evt) {
310 	case OCS_EVT_SRRS_ELS_REQ_OK: {
311 		/* fc_id is in ext_status */
312 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
313 			return NULL;
314 		}
315 
316 		ocs_display_sparams(node->display_name, "fdisc rcvd resp", 0, NULL,
317 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
318 
319 		ocs_assert(node->els_req_cnt, NULL);
320 		node->els_req_cnt--;
321 		/* sm: ocs_sport_attach */
322 		ocs_sport_attach(node->sport, cbdata->ext_status);
323 		ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
324 		break;
325 	}
326 
327 	case OCS_EVT_SRRS_ELS_REQ_RJT:
328 	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
329 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
330 			return NULL;
331 		}
332 		ocs_assert(node->els_req_cnt, NULL);
333 		node->els_req_cnt--;
334 		ocs_log_err(ocs, "FDISC failed, shutting down sport\n");
335 		/* sm: shutdown sport */
336 		ocs_sm_post_event(&node->sport->sm, OCS_EVT_SHUTDOWN, NULL);
337 		break;
338 	}
339 
340 	default:
341 		__ocs_fabric_common(__func__, ctx, evt, arg);
342 		break;
343 	}
344 
345 	return NULL;
346 }
347 
348 /**
349  * @ingroup fabric_sm
350  * @brief Fabric node state machine: Wait for a domain/sport attach event.
351  *
352  * @par Description
353  * Waits for a domain/sport attach event.
354  *
355  * @param ctx Remote node state machine context.
356  * @param evt Event to process.
357  * @param arg Per event optional argument.
358  *
359  * @return Returns NULL.
360  */
361 void *
__ocs_fabric_wait_domain_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)362 __ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
363 {
364 	std_node_state_decl();
365 
366 	node_sm_trace();
367 
368 	switch(evt) {
369 	case OCS_EVT_ENTER:
370 		ocs_node_hold_frames(node);
371 		break;
372 
373 	case OCS_EVT_EXIT:
374 		ocs_node_accept_frames(node);
375 		break;
376 	case OCS_EVT_DOMAIN_ATTACH_OK:
377 	case OCS_EVT_SPORT_ATTACH_OK: {
378 		int rc;
379 
380 		rc = ocs_start_ns_node(node->sport);
381 		if (rc)
382 			return NULL;
383 
384 		/* sm: if enable_ini / start fabctl node
385 		 * Instantiate the fabric controller (sends SCR) */
386 		if (node->sport->enable_rscn) {
387 			rc = ocs_start_fabctl_node(node->sport);
388 			if (rc)
389 				return NULL;
390 		}
391 		ocs_node_transition(node, __ocs_fabric_idle, NULL);
392 		break;
393 	}
394 	default:
395 		__ocs_fabric_common(__func__, ctx, evt, arg);
396 		return NULL;
397 	}
398 
399 	return NULL;
400 }
401 
402 /**
403  * @ingroup fabric_sm
404  * @brief Fabric node state machine: Fabric node is idle.
405  *
406  * @par Description
407  * Wait for fabric node events.
408  *
409  * @param ctx Remote node state machine context.
410  * @param evt Event to process.
411  * @param arg Per event optional argument.
412  *
413  * @return Returns NULL.
414  */
415 void *
__ocs_fabric_idle(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)416 __ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
417 {
418 	std_node_state_decl();
419 
420 	node_sm_trace();
421 
422 	switch(evt) {
423 	case OCS_EVT_DOMAIN_ATTACH_OK:
424 		break;
425 	default:
426 		__ocs_fabric_common(__func__, ctx, evt, arg);
427 		return NULL;
428 	}
429 
430 	return NULL;
431 }
432 
433 /**
434  * @ingroup ns_sm
435  * @brief Name services node state machine: Initialize.
436  *
437  * @par Description
438  * A PLOGI is sent to the well-known name/directory services node.
439  *
440  * @param ctx Remote node state machine context.
441  * @param evt Event to process.
442  * @param arg Per event optional argument.
443  *
444  * @return Returns NULL.
445  */
446 void *
__ocs_ns_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)447 __ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
448 {
449 	std_node_state_decl();
450 
451 	node_sm_trace();
452 
453 	switch(evt) {
454 	case OCS_EVT_ENTER:
455 		/* sm: send PLOGI */
456 		ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
457 		ocs_node_transition(node, __ocs_ns_plogi_wait_rsp, NULL);
458 		break;
459 	default:
460 		__ocs_fabric_common(__func__, ctx, evt, arg);
461 		break;
462 	}
463 
464 	return NULL;
465 }
466 
467 /**
468  * @ingroup ns_sm
469  * @brief Name services node state machine: Wait for a PLOGI response.
470  *
471  * @par Description
472  * Waits for a response from PLOGI to name services node, then issues a
473  * node attach request to the HW.
474  *
475  * @param ctx Remote node state machine context.
476  * @param evt Event to process.
477  * @param arg Per event optional argument.
478  *
479  * @return Returns NULL.
480  */
481 void *
__ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)482 __ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
483 {
484 	int32_t rc;
485 	ocs_node_cb_t *cbdata = arg;
486 	std_node_state_decl();
487 
488 	node_sm_trace();
489 
490 	switch(evt) {
491 	case OCS_EVT_SRRS_ELS_REQ_OK: {
492 		/* Save service parameters */
493 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
494 			return NULL;
495 		}
496 		ocs_assert(node->els_req_cnt, NULL);
497 		node->els_req_cnt--;
498 		/* sm: save sparams, ocs_node_attach */
499 		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
500 		ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
501 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
502 		rc = ocs_node_attach(node);
503 		ocs_node_transition(node, __ocs_ns_wait_node_attach, NULL);
504 		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
505 			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
506 		}
507 		break;
508 	}
509 	default:
510 		__ocs_fabric_common(__func__, ctx, evt, arg);
511 		return NULL;
512 	}
513 
514 	return NULL;
515 }
516 
517 /**
518  * @ingroup ns_sm
519  * @brief Name services node state machine: Wait for a node attach completion.
520  *
521  * @par Description
522  * Waits for a node attach completion, then issues an RFTID name services
523  * request.
524  *
525  * @param ctx Remote node state machine context.
526  * @param evt Event to process.
527  * @param arg Per event optional argument.
528  *
529  * @return Returns NULL.
530  */
531 void *
__ocs_ns_wait_node_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)532 __ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
533 {
534 	std_node_state_decl();
535 
536 	node_sm_trace();
537 
538 	switch(evt) {
539 	case OCS_EVT_ENTER:
540 		ocs_node_hold_frames(node);
541 		break;
542 
543 	case OCS_EVT_EXIT:
544 		ocs_node_accept_frames(node);
545 		break;
546 
547 	case OCS_EVT_NODE_ATTACH_OK:
548 		node->attached = TRUE;
549 		/* sm: send RFTID */
550 		ocs_ns_send_rftid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
551 				 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
552 		ocs_node_transition(node, __ocs_ns_rftid_wait_rsp, NULL);
553 		break;
554 
555 	case OCS_EVT_NODE_ATTACH_FAIL:
556 		/* node attach failed, shutdown the node */
557 		node->attached = FALSE;
558 		node_printf(node, "Node attach failed\n");
559 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
560 		ocs_fabric_initiate_shutdown(node);
561 		break;
562 
563 	case OCS_EVT_SHUTDOWN:
564 		node_printf(node, "Shutdown event received\n");
565 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
566 		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
567 		break;
568 
569 	/* if receive RSCN just ignore,
570 	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
571 	case OCS_EVT_RSCN_RCVD:
572 		break;
573 
574 	default:
575 		__ocs_fabric_common(__func__, ctx, evt, arg);
576 		return NULL;
577 	}
578 
579 	return NULL;
580 }
581 
582 /**
583  * @ingroup ns_sm
584  * @brief Wait for a domain/sport/node attach completion, then
585  * shutdown.
586  *
587  * @par Description
588  * Waits for a domain/sport/node attach completion, then shuts
589  * node down.
590  *
591  * @param ctx Remote node state machine context.
592  * @param evt Event to process.
593  * @param arg Per event optional argument.
594  *
595  * @return Returns NULL.
596  */
597 void *
__ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)598 __ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
599 {
600 	std_node_state_decl();
601 
602 	node_sm_trace();
603 
604 	switch(evt) {
605 	case OCS_EVT_ENTER:
606 		ocs_node_hold_frames(node);
607 		break;
608 
609 	case OCS_EVT_EXIT:
610 		ocs_node_accept_frames(node);
611 		break;
612 
613 	/* wait for any of these attach events and then shutdown */
614 	case OCS_EVT_NODE_ATTACH_OK:
615 		node->attached = TRUE;
616 		node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
617 		ocs_fabric_initiate_shutdown(node);
618 		break;
619 
620 	case OCS_EVT_NODE_ATTACH_FAIL:
621 		node->attached = FALSE;
622 		node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
623 		ocs_fabric_initiate_shutdown(node);
624 		break;
625 
626 	/* ignore shutdown event as we're already in shutdown path */
627 	case OCS_EVT_SHUTDOWN:
628 		node_printf(node, "Shutdown event received\n");
629 		break;
630 
631 	default:
632 		__ocs_fabric_common(__func__, ctx, evt, arg);
633 		return NULL;
634 	}
635 
636 	return NULL;
637 }
638 
639 /**
640  * @ingroup ns_sm
641  * @brief Name services node state machine: Wait for an RFTID response event.
642  *
643  * @par Description
644  * Waits for an RFTID response event; if configured for an initiator operation,
645  * a GIDPT name services request is issued.
646  *
647  * @param ctx Remote node state machine context.
648  * @param evt Event to process.
649  * @param arg Per event optional argument.
650  *
651  * @return Returns NULL.
652  */
653 void *
__ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)654 __ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
655 {
656 	std_node_state_decl();
657 
658 	node_sm_trace();
659 
660 	switch(evt) {
661 	case OCS_EVT_SRRS_ELS_REQ_OK:
662 		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFT_ID, __ocs_fabric_common, __func__)) {
663 			return NULL;
664 		}
665 		ocs_assert(node->els_req_cnt, NULL);
666 		node->els_req_cnt--;
667 		/*sm: send RFFID */
668 		ocs_ns_send_rffid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
669 				OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
670 		ocs_node_transition(node, __ocs_ns_rffid_wait_rsp, NULL);
671 		break;
672 
673 	/* if receive RSCN just ignore,
674 	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
675 	case OCS_EVT_RSCN_RCVD:
676 		break;
677 
678 	default:
679 		__ocs_fabric_common(__func__, ctx, evt, arg);
680 		return NULL;
681 	}
682 
683 	return NULL;
684 }
685 
686 /**
687  * @ingroup ns_sm
688  * @brief Fabric node state machine: Wait for RFFID response event.
689  *
690  * @par Description
691  * Waits for an RFFID response event; if configured for an initiator operation,
692  * a GIDPT name services request is issued.
693  *
694  * @param ctx Remote node state machine context.
695  * @param evt Event to process.
696  * @param arg Per event optional argument.
697  *
698  * @return Returns NULL.
699  */
700 void *
__ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)701 __ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
702 {
703 	std_node_state_decl();
704 
705 	node_sm_trace();
706 
707 	switch(evt) {
708 	case OCS_EVT_SRRS_ELS_REQ_OK:	{
709 		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFF_ID, __ocs_fabric_common, __func__)) {
710 			return NULL;
711 		}
712 		ocs_assert(node->els_req_cnt, NULL);
713 		node->els_req_cnt--;
714 		if (node->sport->enable_rscn) {
715 			/* sm: if enable_rscn / send GIDPT */
716 			ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
717 					OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
718 			ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
719 		} else {
720 			/* if 'T' only, we're done, go to idle */
721 			ocs_node_transition(node, __ocs_ns_idle, NULL);
722 		}
723 		break;
724 	}
725 	/* if receive RSCN just ignore,
726 	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
727 	case OCS_EVT_RSCN_RCVD:
728 		break;
729 
730 	default:
731 		__ocs_fabric_common(__func__, ctx, evt, arg);
732 		return NULL;
733 	}
734 
735 	return NULL;
736 }
737 
738 /**
739  * @ingroup ns_sm
740  * @brief Name services node state machine: Wait for a GIDPT response.
741  *
742  * @par Description
743  * Wait for a GIDPT response from the name server. Process the FC_IDs that are
744  * reported by creating new remote ports, as needed.
745  *
746  * @param ctx Remote node state machine context.
747  * @param evt Event to process.
748  * @param arg Per event optional argument.
749  *
750  * @return Returns NULL.
751  */
752 void *
__ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)753 __ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
754 {
755 	ocs_node_cb_t *cbdata = arg;
756 	std_node_state_decl();
757 
758 	node_sm_trace();
759 
760 	switch(evt) {
761 	case OCS_EVT_SRRS_ELS_REQ_OK:	{
762 		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_GID_PT, __ocs_fabric_common, __func__)) {
763 			return NULL;
764 		}
765 		ocs_assert(node->els_req_cnt, NULL);
766 		node->els_req_cnt--;
767 		/* sm: / process GIDPT payload */
768 		ocs_process_gidpt_payload(node, cbdata->els->els_rsp.virt, cbdata->els->els_rsp.len);
769 		/* TODO: should we logout at this point or just go idle */
770 		ocs_node_transition(node, __ocs_ns_idle, NULL);
771 		break;
772 	}
773 
774 	case OCS_EVT_SRRS_ELS_REQ_FAIL:	{
775 		/* not much we can do; will retry with the next RSCN */
776 		node_printf(node, "GID_PT failed to complete\n");
777 		ocs_assert(node->els_req_cnt, NULL);
778 		node->els_req_cnt--;
779 		ocs_node_transition(node, __ocs_ns_idle, NULL);
780 		break;
781 	}
782 
783 	/* if receive RSCN here, queue up another discovery processing */
784 	case OCS_EVT_RSCN_RCVD: {
785 		node_printf(node, "RSCN received during GID_PT processing\n");
786 		node->rscn_pending = 1;
787 		break;
788 	}
789 
790 	default:
791 		__ocs_fabric_common(__func__, ctx, evt, arg);
792 		return NULL;
793 	}
794 
795 	return NULL;
796 }
797 
798 /**
799  * @ingroup ns_sm
800  * @brief Name services node state machine: Idle state.
801  *
802  * @par Description
803  * Idle. Waiting for RSCN received events (posted from the fabric controller), and
804  * restarts the GIDPT name services query and processing.
805  *
806  * @param ctx Remote node state machine context.
807  * @param evt Event to process.
808  * @param arg Per event optional argument.
809  *
810  * @return Returns NULL.
811  */
812 void *
__ocs_ns_idle(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)813 __ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
814 {
815 	std_node_state_decl();
816 
817 	node_sm_trace();
818 
819 	switch(evt) {
820 	case OCS_EVT_ENTER:
821 		if (!node->rscn_pending) {
822 			break;
823 		}
824 		node_printf(node, "RSCN pending, restart discovery\n");
825 		node->rscn_pending = 0;
826 
827 			/* fall through */
828 
829 	case OCS_EVT_RSCN_RCVD: {
830 		/* sm: / send GIDPT
831 		 * If target RSCN processing is enabled, and this is target only
832 		 * (not initiator), and tgt_rscn_delay is non-zero,
833 		 * then we delay issuing the GID_PT
834 		 */
835 		if ((ocs->tgt_rscn_delay_msec != 0) && !node->sport->enable_ini && node->sport->enable_tgt &&
836 			enable_target_rscn(ocs)) {
837 			ocs_node_transition(node, __ocs_ns_gidpt_delay, NULL);
838 		} else {
839 			ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
840 					OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
841 			ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
842 		}
843 		break;
844 	}
845 
846 	default:
847 		__ocs_fabric_common(__func__, ctx, evt, arg);
848 		break;
849 	}
850 
851 	return NULL;
852 }
853 
854 /**
855  * @brief Handle GIDPT delay timer callback
856  *
857  * @par Description
858  * Post an OCS_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
859  *
860  * @param arg Pointer to node.
861  *
862  * @return None.
863  */
864 static void
gidpt_delay_timer_cb(void * arg)865 gidpt_delay_timer_cb(void *arg)
866 {
867 	ocs_node_t *node = arg;
868 	int32_t rc;
869 
870 	ocs_del_timer(&node->gidpt_delay_timer);
871 	rc = ocs_xport_control(node->ocs->xport, OCS_XPORT_POST_NODE_EVENT, node, OCS_EVT_GIDPT_DELAY_EXPIRED, NULL);
872 	if (rc) {
873 		ocs_log_err(node->ocs, "ocs_xport_control(OCS_XPORT_POST_NODE_EVENT) failed: %d\n", rc);
874 	}
875 }
876 
877 /**
878  * @ingroup ns_sm
879  * @brief Name services node state machine: Delayed GIDPT.
880  *
881  * @par Description
882  * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
883  *
884  * @param ctx Remote node state machine context.
885  * @param evt Event to process.
886  * @param arg Per event optional argument.
887  *
888  * @return Returns NULL.
889  */
890 void *
__ocs_ns_gidpt_delay(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)891 __ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
892 {
893 	std_node_state_decl();
894 
895 	node_sm_trace();
896 
897 	switch(evt) {
898 	case OCS_EVT_ENTER: {
899 		time_t delay_msec;
900 
901 		ocs_assert(ocs->tgt_rscn_delay_msec != 0, NULL);
902 
903 		/*
904 		 * Compute the delay time.   Set to tgt_rscn_delay, if the time since last GIDPT
905 		 * is less than tgt_rscn_period, then use tgt_rscn_period.
906 		 */
907 		delay_msec = ocs->tgt_rscn_delay_msec;
908 		if ((ocs_msectime() - node->time_last_gidpt_msec) < ocs->tgt_rscn_period_msec) {
909 			delay_msec = ocs->tgt_rscn_period_msec;
910 		}
911 
912 		ocs_setup_timer(ocs, &node->gidpt_delay_timer, gidpt_delay_timer_cb, node, delay_msec);
913 
914 		break;
915 	}
916 
917 	case OCS_EVT_GIDPT_DELAY_EXPIRED:
918 		node->time_last_gidpt_msec = ocs_msectime();
919 		ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
920 				OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
921 		ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
922 		break;
923 
924 	case OCS_EVT_RSCN_RCVD: {
925 		ocs_log_debug(ocs, "RSCN received while in GIDPT delay - no action\n");
926 		break;
927 	}
928 
929 	default:
930 		__ocs_fabric_common(__func__, ctx, evt, arg);
931 		break;
932 	}
933 
934 	return NULL;
935 }
936 
937 /**
938  * @ingroup fabric_sm
939  * @brief Fabric controller node state machine: Initial state.
940  *
941  * @par Description
942  * Issue a PLOGI to a well-known fabric controller address.
943  *
944  * @param ctx Remote node state machine context.
945  * @param evt Event to process.
946  * @param arg Per event optional argument.
947  *
948  * @return Returns NULL.
949  */
950 void *
__ocs_fabctl_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)951 __ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
952 {
953 	ocs_node_t *node = ctx->app;
954 
955 	node_sm_trace();
956 
957 	switch(evt) {
958 	case OCS_EVT_ENTER:
959 		/* no need to login to fabric controller, just send SCR */
960 		ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
961 		ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
962 		break;
963 
964 	case OCS_EVT_NODE_ATTACH_OK:
965 		node->attached = TRUE;
966 		break;
967 
968 	default:
969 		__ocs_fabric_common(__func__, ctx, evt, arg);
970 		return NULL;
971 	}
972 
973 	return NULL;
974 }
975 
976 /**
977  * @ingroup fabric_sm
978  * @brief Fabric controller node state machine: Wait for a node attach request
979  * to complete.
980  *
981  * @par Description
982  * Wait for a node attach to complete. If successful, issue an SCR
983  * to the fabric controller, subscribing to all RSCN.
984  *
985  * @param ctx Remote node state machine context.
986  * @param evt Event to process.
987  * @param arg Per event optional argument.
988  *
989  * @return Returns NULL.
990  *
991  */
992 void *
__ocs_fabctl_wait_node_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)993 __ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
994 {
995 	std_node_state_decl();
996 
997 	node_sm_trace();
998 
999 	switch(evt) {
1000 	case OCS_EVT_ENTER:
1001 		ocs_node_hold_frames(node);
1002 		break;
1003 
1004 	case OCS_EVT_EXIT:
1005 		ocs_node_accept_frames(node);
1006 		break;
1007 
1008 	case OCS_EVT_NODE_ATTACH_OK:
1009 		node->attached = TRUE;
1010 		/* sm: / send SCR */
1011 		ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1012 		ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
1013 		break;
1014 
1015 	case OCS_EVT_NODE_ATTACH_FAIL:
1016 		/* node attach failed, shutdown the node */
1017 		node->attached = FALSE;
1018 		node_printf(node, "Node attach failed\n");
1019 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1020 		ocs_fabric_initiate_shutdown(node);
1021 		break;
1022 
1023 	case OCS_EVT_SHUTDOWN:
1024 		node_printf(node, "Shutdown event received\n");
1025 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1026 		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1027 		break;
1028 
1029 	default:
1030 		__ocs_fabric_common(__func__, ctx, evt, arg);
1031 		return NULL;
1032 	}
1033 
1034 	return NULL;
1035 }
1036 
1037 /**
1038  * @ingroup fabric_sm
1039  * @brief Fabric controller node state machine: Wait for an SCR response from the
1040  * fabric controller.
1041  *
1042  * @par Description
1043  * Waits for an SCR response from the fabric controller.
1044  *
1045  * @param ctx Remote node state machine context.
1046  * @param evt Event to process.
1047  * @param arg Per event optional argument.
1048  *
1049  * @return Returns NULL.
1050  */
1051 void *
__ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1052 __ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1053 {
1054 	std_node_state_decl();
1055 
1056 	node_sm_trace();
1057 
1058 	switch(evt) {
1059 	case OCS_EVT_SRRS_ELS_REQ_OK:
1060 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_SCR, __ocs_fabric_common, __func__)) {
1061 			return NULL;
1062 		}
1063 		ocs_assert(node->els_req_cnt, NULL);
1064 		node->els_req_cnt--;
1065 		ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1066 		break;
1067 
1068 	default:
1069 		__ocs_fabric_common(__func__, ctx, evt, arg);
1070 		return NULL;
1071 	}
1072 
1073 	return NULL;
1074 }
1075 
1076 /**
1077  * @ingroup fabric_sm
1078  * @brief Fabric controller node state machine: Ready.
1079  *
1080  * @par Description
1081  * In this state, the fabric controller sends a RSCN, which is received
1082  * by this node and is forwarded to the name services node object; and
1083  * the RSCN LS_ACC is sent.
1084  *
1085  * @param ctx Remote node state machine context.
1086  * @param evt Event to process.
1087  * @param arg Per event optional argument.
1088  *
1089  * @return Returns NULL.
1090  */
1091 
1092 void *
__ocs_fabctl_ready(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1093 __ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1094 {
1095 	ocs_node_cb_t *cbdata = arg;
1096 	std_node_state_decl();
1097 
1098 	node_sm_trace();
1099 
1100 	switch(evt) {
1101 	case OCS_EVT_RSCN_RCVD: {
1102 		fc_header_t *hdr = cbdata->header->dma.virt;
1103 
1104 		/* sm: / process RSCN (forward to name services node),
1105 		 * send LS_ACC */
1106 		ocs_process_rscn(node, cbdata);
1107 		ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1108 		ocs_node_transition(node, __ocs_fabctl_wait_ls_acc_cmpl, NULL);
1109 		break;
1110 	}
1111 
1112 	default:
1113 		__ocs_fabric_common(__func__, ctx, evt, arg);
1114 		return NULL;
1115 	}
1116 
1117 	return NULL;
1118 }
1119 
1120 /**
1121  * @ingroup fabric_sm
1122  * @brief Fabric controller node state machine: Wait for LS_ACC.
1123  *
1124  * @par Description
1125  * Waits for the LS_ACC from the fabric controller.
1126  *
1127  * @param ctx Remote node state machine context.
1128  * @param evt Event to process.
1129  * @param arg Per event optional argument.
1130  *
1131  * @return Returns NULL.
1132  */
1133 
1134 void *
__ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1135 __ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1136 {
1137 	std_node_state_decl();
1138 
1139 	node_sm_trace();
1140 
1141 	switch(evt) {
1142 	case OCS_EVT_ENTER:
1143 		ocs_node_hold_frames(node);
1144 		break;
1145 
1146 	case OCS_EVT_EXIT:
1147 		ocs_node_accept_frames(node);
1148 		break;
1149 
1150 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1151 		ocs_assert(node->els_cmpl_cnt, NULL);
1152 		node->els_cmpl_cnt--;
1153 		ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1154 		break;
1155 
1156 	default:
1157 		__ocs_fabric_common(__func__, ctx, evt, arg);
1158 		return NULL;
1159 	}
1160 
1161 	return NULL;
1162 }
1163 
1164 /**
1165  * @ingroup fabric_sm
1166  * @brief Initiate fabric node shutdown.
1167  *
1168  * @param node Node for which shutdown is initiated.
1169  *
1170  * @return Returns None.
1171  */
1172 
1173 static void
ocs_fabric_initiate_shutdown(ocs_node_t * node)1174 ocs_fabric_initiate_shutdown(ocs_node_t *node)
1175 {
1176 	ocs_hw_rtn_e rc;
1177 	ocs_t *ocs = node->ocs;
1178 	ocs_scsi_io_alloc_disable(node);
1179 
1180 	if (node->attached) {
1181 		/* issue hw node free; don't care if succeeds right away
1182 		 * or sometime later, will check node->attached later in
1183 		 * shutdown process
1184 		 */
1185 		rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
1186 		if (node->rnode.free_group) {
1187 			ocs_remote_node_group_free(node->node_group);
1188 			node->node_group = NULL;
1189 			node->rnode.free_group = FALSE;
1190 		}
1191 		if (rc != OCS_HW_RTN_SUCCESS && rc != OCS_HW_RTN_SUCCESS_SYNC) {
1192 			node_printf(node, "Failed freeing HW node, rc=%d\n", rc);
1193 		}
1194 	}
1195 	/*
1196 	 * node has either been detached or is in the process of being detached,
1197 	 * call common node's initiate cleanup function
1198 	 */
1199 	ocs_node_initiate_cleanup(node);
1200 }
1201 
1202 /**
1203  * @ingroup fabric_sm
1204  * @brief Fabric node state machine: Handle the common fabric node events.
1205  *
1206  * @param funcname Function name text.
1207  * @param ctx Remote node state machine context.
1208  * @param evt Event to process.
1209  * @param arg Per event optional argument.
1210  *
1211  * @return Returns NULL.
1212  */
1213 
1214 static void *
__ocs_fabric_common(const char * funcname,ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1215 __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1216 {
1217 	ocs_node_t *node = NULL;
1218 	ocs_assert(ctx, NULL);
1219 	ocs_assert(ctx->app, NULL);
1220 	node = ctx->app;
1221 
1222 	switch(evt) {
1223 	case OCS_EVT_DOMAIN_ATTACH_OK:
1224 		break;
1225 	case OCS_EVT_SHUTDOWN:
1226 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1227 		ocs_fabric_initiate_shutdown(node);
1228 		break;
1229 
1230 	default:
1231 		/* call default event handler common to all nodes */
1232 		__ocs_node_common(funcname, ctx, evt, arg);
1233 		break;
1234 	}
1235 	return NULL;
1236 }
1237 
1238 /**
1239  * @brief Return TRUE if the remote node is an NPORT.
1240  *
1241  * @par Description
1242  * Examines the service parameters. Returns TRUE if the node reports itself as
1243  * an NPORT.
1244  *
1245  * @param remote_sparms Remote node service parameters.
1246  *
1247  * @return Returns TRUE if NPORT.
1248  */
1249 
1250 int32_t
ocs_rnode_is_nport(fc_plogi_payload_t * remote_sparms)1251 ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms)
1252 {
1253 	return (ocs_be32toh(remote_sparms->common_service_parameters[1]) & (1U << 28)) == 0;
1254 }
1255 
1256 /**
1257  * @brief Return the node's WWPN as an uint64_t.
1258  *
1259  * @par Description
1260  * The WWPN is computed from service parameters, and returned as a uint64_t.
1261  *
1262  * @param sp Pointer to service parameters.
1263  *
1264  * @return Returns WWPN.
1265  *
1266  */
1267 
1268 static uint64_t
ocs_get_wwpn(fc_plogi_payload_t * sp)1269 ocs_get_wwpn(fc_plogi_payload_t *sp)
1270 {
1271 	return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1272 }
1273 
1274 /**
1275  * @brief Return TRUE if the remote node is the point-to-point winner.
1276  *
1277  * @par Description
1278  * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
1279  * higher than the local node's WWPN.
1280  *
1281  * @param sport Pointer to the sport object.
1282  *
1283  * @return
1284  * - 0, if the remote node is the loser.
1285  * - 1, if the remote node is the winner.
1286  * - (-1), if remote node is neither the loser nor the winner
1287  *   (WWPNs match)
1288  */
1289 
1290 static int32_t
ocs_rnode_is_winner(ocs_sport_t * sport)1291 ocs_rnode_is_winner(ocs_sport_t *sport)
1292 {
1293 	fc_plogi_payload_t *remote_sparms = (fc_plogi_payload_t*) sport->domain->flogi_service_params;
1294 	uint64_t remote_wwpn = ocs_get_wwpn(remote_sparms);
1295 	uint64_t local_wwpn = sport->wwpn;
1296 	char prop_buf[32];
1297 	uint64_t wwn_bump = 0;
1298 
1299 	if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
1300 		wwn_bump = ocs_strtoull(prop_buf, 0, 0);
1301 	}
1302 	local_wwpn ^= wwn_bump;
1303 
1304 	remote_wwpn = ocs_get_wwpn(remote_sparms);
1305 
1306 	ocs_log_debug(sport->ocs, "r: %08x %08x\n", ocs_be32toh(remote_sparms->port_name_hi), ocs_be32toh(remote_sparms->port_name_lo));
1307 	ocs_log_debug(sport->ocs, "l: %08x %08x\n", (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1308 
1309 	if (remote_wwpn == local_wwpn) {
1310 		ocs_log_warn(sport->ocs, "WWPN of remote node [%08x %08x] matches local WWPN\n",
1311 			(uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1312 		return (-1);
1313 	}
1314 
1315 	return (remote_wwpn > local_wwpn);
1316 }
1317 
1318 /**
1319  * @ingroup p2p_sm
1320  * @brief Point-to-point state machine: Wait for the domain attach to complete.
1321  *
1322  * @par Description
1323  * Once the domain attach has completed, a PLOGI is sent (if we're the
1324  * winning point-to-point node).
1325  *
1326  * @param ctx Remote node state machine context.
1327  * @param evt Event to process.
1328  * @param arg Per event optional argument.
1329  *
1330  * @return Returns NULL.
1331  */
1332 
1333 void *
__ocs_p2p_wait_domain_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1334 __ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1335 {
1336 	std_node_state_decl();
1337 
1338 	node_sm_trace();
1339 
1340 	switch(evt) {
1341 	case OCS_EVT_ENTER:
1342 		ocs_node_hold_frames(node);
1343 		break;
1344 
1345 	case OCS_EVT_EXIT:
1346 		ocs_node_accept_frames(node);
1347 		break;
1348 
1349 	case OCS_EVT_DOMAIN_ATTACH_OK: {
1350 		ocs_sport_t *sport = node->sport;
1351 		ocs_node_t *rnode;
1352 
1353 		/* this transient node (SID=0 (recv'd FLOGI) or DID=fabric (sent FLOGI))
1354 		 * is the p2p winner, will use a separate node to send PLOGI to peer
1355 		 */
1356 		ocs_assert (node->sport->p2p_winner, NULL);
1357 
1358 		rnode = ocs_node_find(sport, node->sport->p2p_remote_port_id);
1359 		if (rnode != NULL) {
1360 			/* the "other" transient p2p node has already kicked off the
1361 			 * new node from which PLOGI is sent */
1362 			node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id);
1363 			ocs_assert (rnode != node, NULL);
1364 		} else {
1365 			/* create new node (SID=1, DID=2) from which to send PLOGI */
1366 			rnode = ocs_node_alloc(sport, sport->p2p_remote_port_id, FALSE, FALSE);
1367 			if (rnode == NULL) {
1368 				ocs_log_err(ocs, "node alloc failed\n");
1369 				return NULL;
1370 			}
1371 
1372 			ocs_fabric_notify_topology(node);
1373 			/* sm: allocate p2p remote node */
1374 			ocs_node_transition(rnode, __ocs_p2p_rnode_init, NULL);
1375 		}
1376 
1377 		/* the transient node (SID=0 or DID=fabric) has served its purpose */
1378 		if (node->rnode.fc_id == 0) {
1379 			/* if this is the SID=0 node, move to the init state in case peer
1380 			 * has restarted FLOGI discovery and FLOGI is pending
1381 			 */
1382 			/* don't send PLOGI on ocs_d_init entry */
1383 			ocs_node_init_device(node, FALSE);
1384 		} else {
1385 			/* if this is the DID=fabric node (we initiated FLOGI), shut it down */
1386 			node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1387 			ocs_fabric_initiate_shutdown(node);
1388 		}
1389 		break;
1390 	}
1391 
1392 	default:
1393 		__ocs_fabric_common(__func__, ctx, evt, arg);
1394 		return NULL;
1395 	}
1396 
1397 	return NULL;
1398 }
1399 
1400 /**
1401  * @ingroup p2p_sm
1402  * @brief Point-to-point state machine: Remote node initialization state.
1403  *
1404  * @par Description
1405  * This state is entered after winning point-to-point, and the remote node
1406  * is instantiated.
1407  *
1408  * @param ctx Remote node state machine context.
1409  * @param evt Event to process.
1410  * @param arg Per event optional argument.
1411  *
1412  * @return Returns NULL.
1413  */
1414 
1415 void *
__ocs_p2p_rnode_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1416 __ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1417 {
1418 	ocs_node_cb_t *cbdata = arg;
1419 	std_node_state_decl();
1420 
1421 	node_sm_trace();
1422 
1423 	switch(evt) {
1424 	case OCS_EVT_ENTER:
1425 		/* sm: / send PLOGI */
1426 		ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1427 		ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp, NULL);
1428 		break;
1429 
1430 	case OCS_EVT_ABTS_RCVD:
1431 		/* sm: send BA_ACC */
1432 		ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1433 		break;
1434 
1435 	default:
1436 		__ocs_fabric_common(__func__, ctx, evt, arg);
1437 		return NULL;
1438 	}
1439 
1440 	return NULL;
1441 }
1442 
1443 /**
1444  * @ingroup p2p_sm
1445  * @brief Point-to-point node state machine: Wait for the FLOGI accept completion.
1446  *
1447  * @par Description
1448  * Wait for the FLOGI accept completion.
1449  *
1450  * @param ctx Remote node state machine context.
1451  * @param evt Event to process.
1452  * @param arg Per event optional argument.
1453  *
1454  * @return Returns NULL.
1455  */
1456 
1457 void *
__ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1458 __ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1459 {
1460 	ocs_node_cb_t *cbdata = arg;
1461 	std_node_state_decl();
1462 
1463 	node_sm_trace();
1464 
1465 	switch(evt) {
1466 	case OCS_EVT_ENTER:
1467 		ocs_node_hold_frames(node);
1468 		break;
1469 
1470 	case OCS_EVT_EXIT:
1471 		ocs_node_accept_frames(node);
1472 		break;
1473 
1474 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1475 		ocs_assert(node->els_cmpl_cnt, NULL);
1476 		node->els_cmpl_cnt--;
1477 
1478 		/* sm: if p2p_winner / domain_attach */
1479 		if (node->sport->p2p_winner) {
1480 			ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
1481 			if (node->sport->domain->attached &&
1482 			    !(node->sport->domain->domain_notify_pend)) {
1483 				node_printf(node, "Domain already attached\n");
1484 				ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
1485 			}
1486 		} else {
1487 			/* this node has served its purpose; we'll expect a PLOGI on a separate
1488 			 * node (remote SID=0x1); return this node to init state in case peer
1489 			 * restarts discovery -- it may already have (pending frames may exist).
1490 			 */
1491 			/* don't send PLOGI on ocs_d_init entry */
1492 			ocs_node_init_device(node, FALSE);
1493 		}
1494 		break;
1495 
1496 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1497 		/* LS_ACC failed, possibly due to link down; shutdown node and wait
1498 		 * for FLOGI discovery to restart */
1499 		node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1500 		ocs_assert(node->els_cmpl_cnt, NULL);
1501 		node->els_cmpl_cnt--;
1502 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1503 		ocs_fabric_initiate_shutdown(node);
1504 		break;
1505 
1506 	case OCS_EVT_ABTS_RCVD: {
1507 		/* sm: / send BA_ACC */
1508 		ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1509 		break;
1510 	}
1511 
1512 	default:
1513 		__ocs_fabric_common(__func__, ctx, evt, arg);
1514 		return NULL;
1515 	}
1516 
1517 	return NULL;
1518 }
1519 
1520 /**
1521  * @ingroup p2p_sm
1522  * @brief Point-to-point node state machine: Wait for a PLOGI response
1523  * as a point-to-point winner.
1524  *
1525  * @par Description
1526  * Wait for a PLOGI response from the remote node as a point-to-point winner.
1527  * Submit node attach request to the HW.
1528  *
1529  * @param ctx Remote node state machine context.
1530  * @param evt Event to process.
1531  * @param arg Per event optional argument.
1532  *
1533  * @return Returns NULL.
1534  */
1535 
1536 void *
__ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1537 __ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1538 {
1539 	int32_t rc;
1540 	ocs_node_cb_t *cbdata = arg;
1541 	std_node_state_decl();
1542 
1543 	node_sm_trace();
1544 
1545 	switch(evt) {
1546 	case OCS_EVT_SRRS_ELS_REQ_OK: {
1547 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1548 			return NULL;
1549 		}
1550 		ocs_assert(node->els_req_cnt, NULL);
1551 		node->els_req_cnt--;
1552 		/* sm: / save sparams, ocs_node_attach */
1553 		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1554 		rc = ocs_node_attach(node);
1555 		ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1556 		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1557 			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1558 		}
1559 		break;
1560 	}
1561 	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
1562 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1563 			return NULL;
1564 		}
1565 		node_printf(node, "PLOGI failed, shutting down\n");
1566 		ocs_assert(node->els_req_cnt, NULL);
1567 		node->els_req_cnt--;
1568 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1569 		ocs_fabric_initiate_shutdown(node);
1570 		break;
1571 	}
1572 
1573 	case OCS_EVT_PLOGI_RCVD: {
1574 		fc_header_t *hdr = cbdata->header->dma.virt;
1575 		/* if we're in external loopback mode, just send LS_ACC */
1576 		if (node->ocs->external_loopback) {
1577 			ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1578 			break;
1579 		} else{
1580 			/* if this isn't external loopback, pass to default handler */
1581 			__ocs_fabric_common(__func__, ctx, evt, arg);
1582 		}
1583 		break;
1584 	}
1585 	case OCS_EVT_PRLI_RCVD:
1586 		/* I, or I+T */
1587 		/* sent PLOGI and before completion was seen, received the
1588 		 * PRLI from the remote node (WCQEs and RCQEs come in on
1589 		 * different queues and order of processing cannot be assumed)
1590 		 * Save OXID so PRLI can be sent after the attach and continue
1591 		 * to wait for PLOGI response
1592 		 */
1593 		ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1594 		ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1595 		ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp_recvd_prli, NULL);
1596 		break;
1597 	default:
1598 		__ocs_fabric_common(__func__, ctx, evt, arg);
1599 		return NULL;
1600 	}
1601 
1602 	return NULL;
1603 }
1604 
1605 /**
1606  * @ingroup p2p_sm
1607  * @brief Point-to-point node state machine: Waiting on a response for a
1608  *	sent PLOGI.
1609  *
1610  * @par Description
1611  * State is entered when the point-to-point winner has sent
1612  * a PLOGI and is waiting for a response. Before receiving the
1613  * response, a PRLI was received, implying that the PLOGI was
1614  * successful.
1615  *
1616  * @param ctx Remote node state machine context.
1617  * @param evt Event to process.
1618  * @param arg Per event optional argument.
1619  *
1620  * @return Returns NULL.
1621  */
1622 
1623 void *
__ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1624 __ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1625 {
1626 	int32_t rc;
1627 	ocs_node_cb_t *cbdata = arg;
1628 	std_node_state_decl();
1629 
1630 	node_sm_trace();
1631 
1632 	switch(evt) {
1633 	case OCS_EVT_ENTER:
1634 		/*
1635 		 * Since we've received a PRLI, we have a port login and will
1636 		 * just need to wait for the PLOGI response to do the node
1637 		 * attach and then we can send the LS_ACC for the PRLI. If,
1638 		 * during this time, we receive FCP_CMNDs (which is possible
1639 		 * since we've already sent a PRLI and our peer may have accepted).
1640 		 * At this time, we are not waiting on any other unsolicited
1641 		 * frames to continue with the login process. Thus, it will not
1642 		 * hurt to hold frames here.
1643 		 */
1644 		ocs_node_hold_frames(node);
1645 		break;
1646 
1647 	case OCS_EVT_EXIT:
1648 		ocs_node_accept_frames(node);
1649 		break;
1650 
1651 	case OCS_EVT_SRRS_ELS_REQ_OK:	/* PLOGI response received */
1652 		/* Completion from PLOGI sent */
1653 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1654 			return NULL;
1655 		}
1656 		ocs_assert(node->els_req_cnt, NULL);
1657 		node->els_req_cnt--;
1658 		/* sm: / save sparams, ocs_node_attach */
1659 		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1660 		ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1661 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1662 		rc = ocs_node_attach(node);
1663 		ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1664 		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1665 			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1666 		}
1667 		break;
1668 
1669 	case OCS_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
1670 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1671 		/* PLOGI failed, shutdown the node */
1672 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1673 			return NULL;
1674 		}
1675 		ocs_assert(node->els_req_cnt, NULL);
1676 		node->els_req_cnt--;
1677 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1678 		ocs_fabric_initiate_shutdown(node);
1679 		break;
1680 
1681 	default:
1682 		__ocs_fabric_common(__func__, ctx, evt, arg);
1683 		return NULL;
1684 	}
1685 
1686 	return NULL;
1687 }
1688 
1689 /**
1690  * @ingroup p2p_sm
1691  * @brief Point-to-point node state machine: Wait for a point-to-point node attach
1692  * to complete.
1693  *
1694  * @par Description
1695  * Waits for the point-to-point node attach to complete.
1696  *
1697  * @param ctx Remote node state machine context.
1698  * @param evt Event to process.
1699  * @param arg Per event optional argument.
1700  *
1701  * @return Returns NULL.
1702  */
1703 
1704 void *
__ocs_p2p_wait_node_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1705 __ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1706 {
1707 	ocs_node_cb_t *cbdata = arg;
1708 	std_node_state_decl();
1709 
1710 	node_sm_trace();
1711 
1712 	switch(evt) {
1713 	case OCS_EVT_ENTER:
1714 		ocs_node_hold_frames(node);
1715 		break;
1716 
1717 	case OCS_EVT_EXIT:
1718 		ocs_node_accept_frames(node);
1719 		break;
1720 
1721 	case OCS_EVT_NODE_ATTACH_OK:
1722 		node->attached = TRUE;
1723 		switch (node->send_ls_acc) {
1724 		case OCS_NODE_SEND_LS_ACC_PRLI: {
1725 			ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1726 			node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1727 			node->ls_acc_io = NULL;
1728 			break;
1729 		}
1730 		case OCS_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1731 		case OCS_NODE_SEND_LS_ACC_NONE:
1732 		default:
1733 			/* Normal case for I */
1734 			/* sm: send_plogi_acc is not set / send PLOGI acc */
1735 			ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1736 			break;
1737 		}
1738 		break;
1739 
1740 	case OCS_EVT_NODE_ATTACH_FAIL:
1741 		/* node attach failed, shutdown the node */
1742 		node->attached = FALSE;
1743 		node_printf(node, "Node attach failed\n");
1744 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1745 		ocs_fabric_initiate_shutdown(node);
1746 		break;
1747 
1748 	case OCS_EVT_SHUTDOWN:
1749 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1750 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1751 		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1752 		break;
1753 	case OCS_EVT_PRLI_RCVD:
1754 		node_printf(node, "%s: PRLI received before node is attached\n", ocs_sm_event_name(evt));
1755 		ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1756 		ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1757 		break;
1758 	default:
1759 		__ocs_fabric_common(__func__, ctx, evt, arg);
1760 		return NULL;
1761 	}
1762 
1763 	return NULL;
1764 }
1765 
1766 /**
1767  * @brief Start up the name services node.
1768  *
1769  * @par Description
1770  * Allocates and starts up the name services node.
1771  *
1772  * @param sport Pointer to the sport structure.
1773  *
1774  * @return Returns 0 on success, or a negative error value on failure.
1775  */
1776 
1777 static int32_t
ocs_start_ns_node(ocs_sport_t * sport)1778 ocs_start_ns_node(ocs_sport_t *sport)
1779 {
1780 	ocs_node_t *ns;
1781 
1782 	/* Instantiate a name services node */
1783 	ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
1784 	if (ns == NULL) {
1785 		ns = ocs_node_alloc(sport, FC_ADDR_NAMESERVER, FALSE, FALSE);
1786 		if (ns == NULL) {
1787 			return -1;
1788 		}
1789 	}
1790 	/* TODO: for found ns, should we be transitioning from here?
1791 	 * breaks transition only 1. from within state machine or
1792 	 * 2. if after alloc
1793 	 */
1794 	if (ns->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NAMESERVER) {
1795 		ocs_node_pause(ns, __ocs_ns_init);
1796 	} else {
1797 		ocs_node_transition(ns, __ocs_ns_init, NULL);
1798 	}
1799 	return 0;
1800 }
1801 
1802 /**
1803  * @brief Start up the fabric controller node.
1804  *
1805  * @par Description
1806  * Allocates and starts up the fabric controller node.
1807  *
1808  * @param sport Pointer to the sport structure.
1809  *
1810  * @return Returns 0 on success, or a negative error value on failure.
1811  */
1812 
1813 static int32_t
ocs_start_fabctl_node(ocs_sport_t * sport)1814 ocs_start_fabctl_node(ocs_sport_t *sport)
1815 {
1816 	ocs_node_t *fabctl;
1817 
1818 	fabctl = ocs_node_find(sport, FC_ADDR_CONTROLLER);
1819 	if (fabctl == NULL) {
1820 		fabctl = ocs_node_alloc(sport, FC_ADDR_CONTROLLER, FALSE, FALSE);
1821 		if (fabctl == NULL) {
1822 			return -1;
1823 		}
1824 	}
1825 	/* TODO: for found ns, should we be transitioning from here?
1826 	 * breaks transition only 1. from within state machine or
1827 	 * 2. if after alloc
1828 	 */
1829 	ocs_node_transition(fabctl, __ocs_fabctl_init, NULL);
1830 	return 0;
1831 }
1832 
1833 /**
1834  * @brief Process the GIDPT payload.
1835  *
1836  * @par Description
1837  * The GIDPT payload is parsed, and new nodes are created, as needed.
1838  *
1839  * @param node Pointer to the node structure.
1840  * @param gidpt Pointer to the GIDPT payload.
1841  * @param gidpt_len Payload length
1842  *
1843  * @return Returns 0 on success, or a negative error value on failure.
1844  */
1845 
1846 static int32_t
ocs_process_gidpt_payload(ocs_node_t * node,fcct_gidpt_acc_t * gidpt,uint32_t gidpt_len)1847 ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len)
1848 {
1849 	uint32_t i;
1850 	uint32_t j;
1851 	ocs_node_t *newnode;
1852 	ocs_sport_t *sport = node->sport;
1853 	ocs_t *ocs = node->ocs;
1854 	uint32_t port_id;
1855 	uint32_t port_count;
1856 	ocs_node_t *n;
1857 	ocs_node_t **active_nodes;
1858 	uint32_t portlist_count;
1859 	uint16_t residual;
1860 
1861 	residual = ocs_be16toh(gidpt->hdr.max_residual_size);
1862 
1863 	if (residual != 0) {
1864 		ocs_log_debug(node->ocs, "residual is %u words\n", residual);
1865 	}
1866 
1867 	if (ocs_be16toh(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
1868 		node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n",
1869 			gidpt->hdr.reason_code, gidpt->hdr.reason_code_explanation);
1870 		return -1;
1871 	}
1872 
1873 	portlist_count = (gidpt_len - sizeof(fcct_iu_header_t)) / sizeof(gidpt->port_list);
1874 
1875 	/* Count the number of nodes */
1876 	port_count = 0;
1877 	ocs_sport_lock(sport);
1878 		ocs_list_foreach(&sport->node_list, n) {
1879 			port_count ++;
1880 		}
1881 
1882 		/* Allocate a buffer for all nodes */
1883 		active_nodes = ocs_malloc(node->ocs, port_count * sizeof(*active_nodes), OCS_M_NOWAIT | OCS_M_ZERO);
1884 		if (active_nodes == NULL) {
1885 			node_printf(node, "ocs_malloc failed\n");
1886 			ocs_sport_unlock(sport);
1887 			return -1;
1888 		}
1889 
1890 		/* Fill buffer with fc_id of active nodes */
1891 		i = 0;
1892 		ocs_list_foreach(&sport->node_list, n) {
1893 			port_id = n->rnode.fc_id;
1894 			switch (port_id) {
1895 			case FC_ADDR_FABRIC:
1896 			case FC_ADDR_CONTROLLER:
1897 			case FC_ADDR_NAMESERVER:
1898 				break;
1899 			default:
1900 				if (!FC_ADDR_IS_DOMAIN_CTRL(port_id)) {
1901 					active_nodes[i++] = n;
1902 				}
1903 				break;
1904 			}
1905 		}
1906 
1907 		/* update the active nodes buffer */
1908 		for (i = 0; i < portlist_count; i ++) {
1909 			port_id = fc_be24toh(gidpt->port_list[i].port_id);
1910 
1911 			for (j = 0; j < port_count; j ++) {
1912 				if ((active_nodes[j] != NULL) && (port_id == active_nodes[j]->rnode.fc_id)) {
1913 					active_nodes[j] = NULL;
1914 				}
1915 			}
1916 
1917 			if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
1918 				break;
1919 		}
1920 
1921 		/* Those remaining in the active_nodes[] are now gone ! */
1922 		for (i = 0; i < port_count; i ++) {
1923 			/* if we're an initiator and the remote node is a target, then
1924 			 * post the node missing event.   if we're target and we have enabled
1925 			 * target RSCN, then post the node missing event.
1926 			 */
1927 			if (active_nodes[i] != NULL) {
1928 				if ((node->sport->enable_ini && active_nodes[i]->targ) ||
1929 				    (node->sport->enable_tgt && enable_target_rscn(ocs))) {
1930 					ocs_node_post_event(active_nodes[i], OCS_EVT_NODE_MISSING, NULL);
1931 				} else {
1932 					node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n",
1933 						active_nodes[i]->rnode.fc_id);
1934 				}
1935 			}
1936 		}
1937 		ocs_free(ocs, active_nodes, port_count * sizeof(*active_nodes));
1938 
1939 		for(i = 0; i < portlist_count; i ++) {
1940 			uint32_t port_id = fc_be24toh(gidpt->port_list[i].port_id);
1941 
1942 			/* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
1943 
1944 			/* Don't create node for ourselves or the associated NPIV ports */
1945 			if (port_id != node->rnode.sport->fc_id && !ocs_sport_find(sport->domain, port_id)) {
1946 				newnode = ocs_node_find(sport, port_id);
1947 				if (newnode) {
1948 					/* TODO: what if node deleted here?? */
1949 					if (node->sport->enable_ini && newnode->targ) {
1950 						ocs_node_post_event(newnode, OCS_EVT_NODE_REFOUND, NULL);
1951 					}
1952 					/* original code sends ADISC, has notion of "refound" */
1953 				} else {
1954 					if (node->sport->enable_ini) {
1955 						newnode = ocs_node_alloc(sport, port_id, 0, 0);
1956 						if (newnode == NULL) {
1957 							ocs_log_err(ocs, "ocs_node_alloc() failed\n");
1958 							ocs_sport_unlock(sport);
1959 							return -1;
1960 						}
1961 						/* send PLOGI automatically if initiator */
1962 						ocs_node_init_device(newnode, TRUE);
1963 					}
1964 				}
1965 			}
1966 
1967 			if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID) {
1968 				break;
1969 			}
1970 		}
1971 	ocs_sport_unlock(sport);
1972 	return 0;
1973 }
1974 
1975 /**
1976  * @brief Set up the domain point-to-point parameters.
1977  *
1978  * @par Description
1979  * The remote node service parameters are examined, and various point-to-point
1980  * variables are set.
1981  *
1982  * @param sport Pointer to the sport object.
1983  *
1984  * @return Returns 0 on success, or a negative error value on failure.
1985  */
1986 
1987 int32_t
ocs_p2p_setup(ocs_sport_t * sport)1988 ocs_p2p_setup(ocs_sport_t *sport)
1989 {
1990 	ocs_t *ocs = sport->ocs;
1991 	int32_t rnode_winner;
1992 	rnode_winner = ocs_rnode_is_winner(sport);
1993 
1994 	/* set sport flags to indicate p2p "winner" */
1995 	if (rnode_winner == 1) {
1996 		sport->p2p_remote_port_id = 0;
1997 		sport->p2p_port_id = 0;
1998 		sport->p2p_winner = FALSE;
1999 	} else if (rnode_winner == 0) {
2000 		sport->p2p_remote_port_id = 2;
2001 		sport->p2p_port_id = 1;
2002 		sport->p2p_winner = TRUE;
2003 	} else {
2004 		/* no winner; only okay if external loopback enabled */
2005 		if (sport->ocs->external_loopback) {
2006 			/*
2007 			 * External loopback mode enabled; local sport and remote node
2008 			 * will be registered with an NPortID = 1;
2009 			 */
2010 			ocs_log_debug(ocs, "External loopback mode enabled\n");
2011 			sport->p2p_remote_port_id = 1;
2012 			sport->p2p_port_id = 1;
2013 			sport->p2p_winner = TRUE;
2014 		} else {
2015 			ocs_log_warn(ocs, "failed to determine p2p winner\n");
2016 			return rnode_winner;
2017 		}
2018 	}
2019 	return 0;
2020 }
2021 
2022 /**
2023  * @brief Process the FABCTL node RSCN.
2024  *
2025  * <h3 class="desc">Description</h3>
2026  * Processes the FABCTL node RSCN payload, simply passes the event to the name server.
2027  *
2028  * @param node Pointer to the node structure.
2029  * @param cbdata Callback data to pass forward.
2030  *
2031  * @return None.
2032  */
2033 
2034 static void
ocs_process_rscn(ocs_node_t * node,ocs_node_cb_t * cbdata)2035 ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata)
2036 {
2037 	ocs_t *ocs = node->ocs;
2038 	ocs_sport_t *sport = node->sport;
2039 	ocs_node_t *ns;
2040 
2041 	/* Forward this event to the name-services node */
2042 	ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
2043 	if (ns != NULL)  {
2044 		ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, cbdata);
2045 	} else {
2046 		ocs_log_warn(ocs, "can't find name server node\n");
2047 	}
2048 }
2049