xref: /linux/drivers/scsi/bfa/bfa_svc.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4  * Copyright (c) 2014- QLogic Corporation.
5  * All rights reserved
6  * www.qlogic.com
7  *
8  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9  */
10 
11 #include "bfad_drv.h"
12 #include "bfad_im.h"
13 #include "bfa_plog.h"
14 #include "bfa_cs.h"
15 #include "bfa_modules.h"
16 
17 BFA_TRC_FILE(HAL, FCXP);
18 
19 /*
20  * LPS related definitions
21  */
22 #define BFA_LPS_MIN_LPORTS      (1)
23 #define BFA_LPS_MAX_LPORTS      (256)
24 
25 /*
26  * Maximum Vports supported per physical port or vf.
27  */
28 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
29 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
30 
31 
32 /*
33  * FC PORT related definitions
34  */
35 /*
36  * The port is considered disabled if corresponding physical port or IOC are
37  * disabled explicitly
38  */
39 #define BFA_PORT_IS_DISABLED(bfa) \
40 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
41 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
42 
43 /*
44  * RPORT related definitions
45  */
46 #define bfa_rport_offline_cb(__rp) do {					\
47 	if ((__rp)->bfa->fcs)						\
48 		bfa_cb_rport_offline((__rp)->rport_drv);      \
49 	else {								\
50 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
51 				__bfa_cb_rport_offline, (__rp));      \
52 	}								\
53 } while (0)
54 
55 #define bfa_rport_online_cb(__rp) do {					\
56 	if ((__rp)->bfa->fcs)						\
57 		bfa_cb_rport_online((__rp)->rport_drv);      \
58 	else {								\
59 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
60 				  __bfa_cb_rport_online, (__rp));      \
61 		}							\
62 } while (0)
63 
64 /*
65  * forward declarations FCXP related functions
66  */
67 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
68 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
69 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
70 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
71 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
72 static void	bfa_fcxp_qresume(void *cbarg);
73 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
74 				struct bfi_fcxp_send_req_s *send_req);
75 
76 /*
77  * forward declarations for LPS functions
78  */
79 static void bfa_lps_login_rsp(struct bfa_s *bfa,
80 				struct bfi_lps_login_rsp_s *rsp);
81 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
82 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
83 				struct bfi_lps_logout_rsp_s *rsp);
84 static void bfa_lps_reqq_resume(void *lps_arg);
85 static void bfa_lps_free(struct bfa_lps_s *lps);
86 static void bfa_lps_send_login(struct bfa_lps_s *lps);
87 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
88 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
89 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
90 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
91 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
92 
93 /*
94  * forward declaration for LPS state machine
95  */
96 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
97 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
98 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
99 					event);
100 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
101 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
102 					enum bfa_lps_event event);
103 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
104 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
105 					event);
106 
107 /*
108  * forward declaration for FC Port functions
109  */
110 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
111 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
112 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
113 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
114 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
115 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
116 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
117 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
118 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
119 				enum bfa_port_linkstate event);
120 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
121 static void bfa_fcport_stats_get_timeout(void *cbarg);
122 static void bfa_fcport_stats_clr_timeout(void *cbarg);
123 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
124 
125 /*
126  * forward declaration for FC PORT state machine
127  */
128 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
129 					enum bfa_fcport_sm_event event);
130 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
131 					enum bfa_fcport_sm_event event);
132 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
133 					enum bfa_fcport_sm_event event);
134 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
135 					enum bfa_fcport_sm_event event);
136 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
137 					enum bfa_fcport_sm_event event);
138 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
139 					enum bfa_fcport_sm_event event);
140 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
141 					enum bfa_fcport_sm_event event);
142 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
143 					enum bfa_fcport_sm_event event);
144 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
145 					enum bfa_fcport_sm_event event);
146 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
147 					enum bfa_fcport_sm_event event);
148 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
149 					enum bfa_fcport_sm_event event);
150 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
151 					enum bfa_fcport_sm_event event);
152 static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
153 					enum bfa_fcport_sm_event event);
154 static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
155 					enum bfa_fcport_sm_event event);
156 static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
157 					enum bfa_fcport_sm_event event);
158 
159 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
160 					enum bfa_fcport_ln_sm_event event);
161 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
162 					enum bfa_fcport_ln_sm_event event);
163 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
164 					enum bfa_fcport_ln_sm_event event);
165 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
166 					enum bfa_fcport_ln_sm_event event);
167 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
168 					enum bfa_fcport_ln_sm_event event);
169 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
170 					enum bfa_fcport_ln_sm_event event);
171 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
172 					enum bfa_fcport_ln_sm_event event);
173 
174 struct bfa_fcport_sm_table_s {
175 	bfa_fcport_sm_t sm;		/*  state machine function	*/
176 	enum bfa_port_states state;	/*  state machine encoding	*/
177 	char		*name;		/*  state name for display	*/
178 };
179 
180 static inline enum bfa_port_states
181 bfa_fcport_sm_to_state(struct bfa_fcport_sm_table_s *smt, bfa_fcport_sm_t sm)
182 {
183 	int i = 0;
184 
185 	while (smt[i].sm && smt[i].sm != sm)
186 		i++;
187 	return smt[i].state;
188 }
189 
190 static struct bfa_fcport_sm_table_s hal_port_sm_table[] = {
191 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
192 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
193 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
194 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
195 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
196 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
197 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
198 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
199 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
200 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
201 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
202 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
203 	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
204 	{BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
205 	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
206 };
207 
208 
209 /*
210  * forward declaration for RPORT related functions
211  */
212 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
213 static void		bfa_rport_free(struct bfa_rport_s *rport);
214 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
215 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
216 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
217 static void		__bfa_cb_rport_online(void *cbarg,
218 						bfa_boolean_t complete);
219 static void		__bfa_cb_rport_offline(void *cbarg,
220 						bfa_boolean_t complete);
221 
222 /*
223  * forward declaration for RPORT state machine
224  */
225 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
226 					enum bfa_rport_event event);
227 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
228 					enum bfa_rport_event event);
229 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
230 					enum bfa_rport_event event);
231 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
232 					enum bfa_rport_event event);
233 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
234 					enum bfa_rport_event event);
235 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
236 					enum bfa_rport_event event);
237 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
238 					enum bfa_rport_event event);
239 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
240 					enum bfa_rport_event event);
241 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
242 					enum bfa_rport_event event);
243 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
244 					enum bfa_rport_event event);
245 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
246 					enum bfa_rport_event event);
247 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
248 					enum bfa_rport_event event);
249 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
250 					enum bfa_rport_event event);
251 
252 /*
253  * PLOG related definitions
254  */
255 static int
256 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
257 {
258 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
259 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
260 		return 1;
261 
262 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
263 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
264 		return 1;
265 
266 	return 0;
267 }
268 
269 static void
270 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
271 {
272 	u16 tail;
273 	struct bfa_plog_rec_s *pl_recp;
274 
275 	if (plog->plog_enabled == 0)
276 		return;
277 
278 	if (plkd_validate_logrec(pl_rec)) {
279 		WARN_ON(1);
280 		return;
281 	}
282 
283 	tail = plog->tail;
284 
285 	pl_recp = &(plog->plog_recs[tail]);
286 
287 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
288 
289 	pl_recp->tv = ktime_get_real_seconds();
290 	BFA_PL_LOG_REC_INCR(plog->tail);
291 
292 	if (plog->head == plog->tail)
293 		BFA_PL_LOG_REC_INCR(plog->head);
294 }
295 
296 void
297 bfa_plog_init(struct bfa_plog_s *plog)
298 {
299 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
300 
301 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
302 	plog->head = plog->tail = 0;
303 	plog->plog_enabled = 1;
304 }
305 
306 void
307 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
308 		enum bfa_plog_eid event,
309 		u16 misc, char *log_str)
310 {
311 	struct bfa_plog_rec_s  lp;
312 
313 	if (plog->plog_enabled) {
314 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
315 		lp.mid = mid;
316 		lp.eid = event;
317 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
318 		lp.misc = misc;
319 		strscpy(lp.log_entry.string_log, log_str,
320 			BFA_PL_STRING_LOG_SZ);
321 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
322 		bfa_plog_add(plog, &lp);
323 	}
324 }
325 
326 void
327 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
328 		enum bfa_plog_eid event,
329 		u16 misc, u32 *intarr, u32 num_ints)
330 {
331 	struct bfa_plog_rec_s  lp;
332 	u32 i;
333 
334 	if (num_ints > BFA_PL_INT_LOG_SZ)
335 		num_ints = BFA_PL_INT_LOG_SZ;
336 
337 	if (plog->plog_enabled) {
338 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
339 		lp.mid = mid;
340 		lp.eid = event;
341 		lp.log_type = BFA_PL_LOG_TYPE_INT;
342 		lp.misc = misc;
343 
344 		for (i = 0; i < num_ints; i++)
345 			lp.log_entry.int_log[i] = intarr[i];
346 
347 		lp.log_num_ints = (u8) num_ints;
348 
349 		bfa_plog_add(plog, &lp);
350 	}
351 }
352 
353 void
354 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
355 			enum bfa_plog_eid event,
356 			u16 misc, struct fchs_s *fchdr)
357 {
358 	u32	*tmp_int = (u32 *) fchdr;
359 	u32	ints[BFA_PL_INT_LOG_SZ];
360 
361 	if (plog->plog_enabled) {
362 		ints[0] = tmp_int[0];
363 		ints[1] = tmp_int[1];
364 		ints[2] = tmp_int[4];
365 
366 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
367 	}
368 }
369 
370 void
371 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
372 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
373 		      u32 pld_w0)
374 {
375 	u32	*tmp_int = (u32 *) fchdr;
376 	u32	ints[BFA_PL_INT_LOG_SZ];
377 
378 	if (plog->plog_enabled) {
379 		ints[0] = tmp_int[0];
380 		ints[1] = tmp_int[1];
381 		ints[2] = tmp_int[4];
382 		ints[3] = pld_w0;
383 
384 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
385 	}
386 }
387 
388 
389 /*
390  *  fcxp_pvt BFA FCXP private functions
391  */
392 
393 static void
394 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
395 {
396 	u16	i;
397 	struct bfa_fcxp_s *fcxp;
398 
399 	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
400 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
401 
402 	INIT_LIST_HEAD(&mod->fcxp_req_free_q);
403 	INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
404 	INIT_LIST_HEAD(&mod->fcxp_active_q);
405 	INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
406 	INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
407 
408 	mod->fcxp_list = fcxp;
409 
410 	for (i = 0; i < mod->num_fcxps; i++) {
411 		fcxp->fcxp_mod = mod;
412 		fcxp->fcxp_tag = i;
413 
414 		if (i < (mod->num_fcxps / 2)) {
415 			list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
416 			fcxp->req_rsp = BFA_TRUE;
417 		} else {
418 			list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
419 			fcxp->req_rsp = BFA_FALSE;
420 		}
421 
422 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
423 		fcxp->reqq_waiting = BFA_FALSE;
424 
425 		fcxp = fcxp + 1;
426 	}
427 
428 	bfa_mem_kva_curp(mod) = (void *)fcxp;
429 }
430 
431 void
432 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
433 		struct bfa_s *bfa)
434 {
435 	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
436 	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
437 	struct bfa_mem_dma_s *seg_ptr;
438 	u16	nsegs, idx, per_seg_fcxp;
439 	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
440 	u32	per_fcxp_sz;
441 
442 	if (num_fcxps == 0)
443 		return;
444 
445 	if (cfg->drvcfg.min_cfg)
446 		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
447 	else
448 		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
449 
450 	/* dma memory */
451 	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
452 	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
453 
454 	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
455 		if (num_fcxps >= per_seg_fcxp) {
456 			num_fcxps -= per_seg_fcxp;
457 			bfa_mem_dma_setup(minfo, seg_ptr,
458 				per_seg_fcxp * per_fcxp_sz);
459 		} else
460 			bfa_mem_dma_setup(minfo, seg_ptr,
461 				num_fcxps * per_fcxp_sz);
462 	}
463 
464 	/* kva memory */
465 	bfa_mem_kva_setup(minfo, fcxp_kva,
466 		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
467 }
468 
469 void
470 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
471 		struct bfa_pcidev_s *pcidev)
472 {
473 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
474 
475 	mod->bfa = bfa;
476 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
477 
478 	/*
479 	 * Initialize FCXP request and response payload sizes.
480 	 */
481 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
482 	if (!cfg->drvcfg.min_cfg)
483 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
484 
485 	INIT_LIST_HEAD(&mod->req_wait_q);
486 	INIT_LIST_HEAD(&mod->rsp_wait_q);
487 
488 	claim_fcxps_mem(mod);
489 }
490 
491 void
492 bfa_fcxp_iocdisable(struct bfa_s *bfa)
493 {
494 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
495 	struct bfa_fcxp_s *fcxp;
496 	struct list_head	      *qe, *qen;
497 
498 	/* Enqueue unused fcxp resources to free_q */
499 	list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
500 	list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
501 
502 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
503 		fcxp = (struct bfa_fcxp_s *) qe;
504 		if (fcxp->caller == NULL) {
505 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
506 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
507 			bfa_fcxp_free(fcxp);
508 		} else {
509 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
510 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
511 				     __bfa_fcxp_send_cbfn, fcxp);
512 		}
513 	}
514 }
515 
516 static struct bfa_fcxp_s *
517 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
518 {
519 	struct bfa_fcxp_s *fcxp;
520 
521 	if (req)
522 		bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
523 	else
524 		bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
525 
526 	if (fcxp)
527 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
528 
529 	return fcxp;
530 }
531 
532 static void
533 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
534 	       struct bfa_s *bfa,
535 	       u8 *use_ibuf,
536 	       u32 *nr_sgles,
537 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
538 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
539 	       struct list_head *r_sgpg_q,
540 	       int n_sgles,
541 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
542 	       bfa_fcxp_get_sglen_t sglen_cbfn)
543 {
544 
545 	WARN_ON(bfa == NULL);
546 
547 	bfa_trc(bfa, fcxp->fcxp_tag);
548 
549 	if (n_sgles == 0) {
550 		*use_ibuf = 1;
551 	} else {
552 		WARN_ON(*sga_cbfn == NULL);
553 		WARN_ON(*sglen_cbfn == NULL);
554 
555 		*use_ibuf = 0;
556 		*r_sga_cbfn = sga_cbfn;
557 		*r_sglen_cbfn = sglen_cbfn;
558 
559 		*nr_sgles = n_sgles;
560 
561 		/*
562 		 * alloc required sgpgs
563 		 */
564 		if (n_sgles > BFI_SGE_INLINE)
565 			WARN_ON(1);
566 	}
567 
568 }
569 
570 static void
571 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
572 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
573 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
574 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
575 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
576 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
577 {
578 
579 	WARN_ON(bfa == NULL);
580 
581 	bfa_trc(bfa, fcxp->fcxp_tag);
582 
583 	fcxp->caller = caller;
584 
585 	bfa_fcxp_init_reqrsp(fcxp, bfa,
586 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
587 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
588 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
589 
590 	bfa_fcxp_init_reqrsp(fcxp, bfa,
591 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
592 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
593 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
594 
595 }
596 
597 static void
598 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
599 {
600 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
601 	struct bfa_fcxp_wqe_s *wqe;
602 
603 	if (fcxp->req_rsp)
604 		bfa_q_deq(&mod->req_wait_q, &wqe);
605 	else
606 		bfa_q_deq(&mod->rsp_wait_q, &wqe);
607 
608 	if (wqe) {
609 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
610 
611 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
612 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
613 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
614 			wqe->rsp_sglen_cbfn);
615 
616 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
617 		return;
618 	}
619 
620 	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
621 	list_del(&fcxp->qe);
622 
623 	if (fcxp->req_rsp)
624 		list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
625 	else
626 		list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
627 }
628 
629 static void
630 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
631 		   bfa_status_t req_status, u32 rsp_len,
632 		   u32 resid_len, struct fchs_s *rsp_fchs)
633 {
634 	/* discarded fcxp completion */
635 }
636 
637 static void
638 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
639 {
640 	struct bfa_fcxp_s *fcxp = cbarg;
641 
642 	if (complete) {
643 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
644 				fcxp->rsp_status, fcxp->rsp_len,
645 				fcxp->residue_len, &fcxp->rsp_fchs);
646 	} else {
647 		bfa_fcxp_free(fcxp);
648 	}
649 }
650 
651 static void
652 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
653 {
654 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
655 	struct bfa_fcxp_s	*fcxp;
656 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
657 
658 	bfa_trc(bfa, fcxp_tag);
659 
660 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
661 
662 	/*
663 	 * @todo f/w should not set residue to non-0 when everything
664 	 *	 is received.
665 	 */
666 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
667 		fcxp_rsp->residue_len = 0;
668 	else
669 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
670 
671 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
672 
673 	WARN_ON(fcxp->send_cbfn == NULL);
674 
675 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
676 
677 	if (fcxp->send_cbfn != NULL) {
678 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
679 		if (fcxp->caller == NULL) {
680 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
681 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
682 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
683 			/*
684 			 * fcxp automatically freed on return from the callback
685 			 */
686 			bfa_fcxp_free(fcxp);
687 		} else {
688 			fcxp->rsp_status = fcxp_rsp->req_status;
689 			fcxp->rsp_len = fcxp_rsp->rsp_len;
690 			fcxp->residue_len = fcxp_rsp->residue_len;
691 			fcxp->rsp_fchs = fcxp_rsp->fchs;
692 
693 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
694 					__bfa_fcxp_send_cbfn, fcxp);
695 		}
696 	} else {
697 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
698 	}
699 }
700 
701 static void
702 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
703 		 struct fchs_s *fchs)
704 {
705 	/*
706 	 * TODO: TX ox_id
707 	 */
708 	if (reqlen > 0) {
709 		if (fcxp->use_ireqbuf) {
710 			u32	pld_w0 =
711 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
712 
713 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
714 					BFA_PL_EID_TX,
715 					reqlen + sizeof(struct fchs_s), fchs,
716 					pld_w0);
717 		} else {
718 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
719 					BFA_PL_EID_TX,
720 					reqlen + sizeof(struct fchs_s),
721 					fchs);
722 		}
723 	} else {
724 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
725 			       reqlen + sizeof(struct fchs_s), fchs);
726 	}
727 }
728 
729 static void
730 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
731 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
732 {
733 	if (fcxp_rsp->rsp_len > 0) {
734 		if (fcxp->use_irspbuf) {
735 			u32	pld_w0 =
736 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
737 
738 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
739 					      BFA_PL_EID_RX,
740 					      (u16) fcxp_rsp->rsp_len,
741 					      &fcxp_rsp->fchs, pld_w0);
742 		} else {
743 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
744 				       BFA_PL_EID_RX,
745 				       (u16) fcxp_rsp->rsp_len,
746 				       &fcxp_rsp->fchs);
747 		}
748 	} else {
749 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
750 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
751 	}
752 }
753 
754 /*
755  * Handler to resume sending fcxp when space in available in cpe queue.
756  */
757 static void
758 bfa_fcxp_qresume(void *cbarg)
759 {
760 	struct bfa_fcxp_s		*fcxp = cbarg;
761 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
762 	struct bfi_fcxp_send_req_s	*send_req;
763 
764 	fcxp->reqq_waiting = BFA_FALSE;
765 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
766 	bfa_fcxp_queue(fcxp, send_req);
767 }
768 
769 /*
770  * Queue fcxp send request to foimrware.
771  */
772 static void
773 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
774 {
775 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
776 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
777 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
778 	struct bfa_rport_s		*rport = reqi->bfa_rport;
779 
780 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
781 		    bfa_fn_lpu(bfa));
782 
783 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
784 	if (rport) {
785 		send_req->rport_fw_hndl = rport->fw_handle;
786 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
787 		if (send_req->max_frmsz == 0)
788 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
789 	} else {
790 		send_req->rport_fw_hndl = 0;
791 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
792 	}
793 
794 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
795 	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
796 	send_req->class = reqi->class;
797 	send_req->rsp_timeout = rspi->rsp_timeout;
798 	send_req->cts = reqi->cts;
799 	send_req->fchs = reqi->fchs;
800 
801 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
802 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
803 
804 	/*
805 	 * setup req sgles
806 	 */
807 	if (fcxp->use_ireqbuf == 1) {
808 		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
809 					BFA_FCXP_REQ_PLD_PA(fcxp));
810 	} else {
811 		if (fcxp->nreq_sgles > 0) {
812 			WARN_ON(fcxp->nreq_sgles != 1);
813 			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
814 				fcxp->req_sga_cbfn(fcxp->caller, 0));
815 		} else {
816 			WARN_ON(reqi->req_tot_len != 0);
817 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
818 		}
819 	}
820 
821 	/*
822 	 * setup rsp sgles
823 	 */
824 	if (fcxp->use_irspbuf == 1) {
825 		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
826 
827 		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
828 					BFA_FCXP_RSP_PLD_PA(fcxp));
829 	} else {
830 		if (fcxp->nrsp_sgles > 0) {
831 			WARN_ON(fcxp->nrsp_sgles != 1);
832 			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
833 				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
834 
835 		} else {
836 			WARN_ON(rspi->rsp_maxlen != 0);
837 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
838 		}
839 	}
840 
841 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
842 
843 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
844 
845 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
846 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
847 }
848 
849 /*
850  * Allocate an FCXP instance to send a response or to send a request
851  * that has a response. Request/response buffers are allocated by caller.
852  *
853  * @param[in]	bfa		BFA bfa instance
854  * @param[in]	nreq_sgles	Number of SG elements required for request
855  *				buffer. 0, if fcxp internal buffers are	used.
856  *				Use bfa_fcxp_get_reqbuf() to get the
857  *				internal req buffer.
858  * @param[in]	req_sgles	SG elements describing request buffer. Will be
859  *				copied in by BFA and hence can be freed on
860  *				return from this function.
861  * @param[in]	get_req_sga	function ptr to be called to get a request SG
862  *				Address (given the sge index).
863  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
864  *				len (given the sge index).
865  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
866  *				Address (given the sge index).
867  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
868  *				len (given the sge index).
869  * @param[in]	req		Allocated FCXP is used to send req or rsp?
870  *				request - BFA_TRUE, response - BFA_FALSE
871  *
872  * @return FCXP instance. NULL on failure.
873  */
874 struct bfa_fcxp_s *
875 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
876 		int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
877 		bfa_fcxp_get_sglen_t req_sglen_cbfn,
878 		bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
879 		bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
880 {
881 	struct bfa_fcxp_s *fcxp = NULL;
882 
883 	WARN_ON(bfa == NULL);
884 
885 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
886 	if (fcxp == NULL)
887 		return NULL;
888 
889 	bfa_trc(bfa, fcxp->fcxp_tag);
890 
891 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
892 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
893 
894 	return fcxp;
895 }
896 
897 /*
898  * Get the internal request buffer pointer
899  *
900  * @param[in]	fcxp	BFA fcxp pointer
901  *
902  * @return		pointer to the internal request buffer
903  */
904 void *
905 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
906 {
907 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
908 	void	*reqbuf;
909 
910 	WARN_ON(fcxp->use_ireqbuf != 1);
911 	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
912 				mod->req_pld_sz + mod->rsp_pld_sz);
913 	return reqbuf;
914 }
915 
916 u32
917 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
918 {
919 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
920 
921 	return mod->req_pld_sz;
922 }
923 
924 /*
925  * Get the internal response buffer pointer
926  *
927  * @param[in]	fcxp	BFA fcxp pointer
928  *
929  * @return		pointer to the internal request buffer
930  */
931 void *
932 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
933 {
934 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
935 	void	*fcxp_buf;
936 
937 	WARN_ON(fcxp->use_irspbuf != 1);
938 
939 	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
940 				mod->req_pld_sz + mod->rsp_pld_sz);
941 
942 	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
943 	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
944 }
945 
946 /*
947  * Free the BFA FCXP
948  *
949  * @param[in]	fcxp			BFA fcxp pointer
950  *
951  * @return		void
952  */
953 void
954 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
955 {
956 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
957 
958 	WARN_ON(fcxp == NULL);
959 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
960 	bfa_fcxp_put(fcxp);
961 }
962 
963 /*
964  * Send a FCXP request
965  *
966  * @param[in]	fcxp	BFA fcxp pointer
967  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
968  * @param[in]	vf_id	virtual Fabric ID
969  * @param[in]	lp_tag	lport tag
970  * @param[in]	cts	use Continuous sequence
971  * @param[in]	cos	fc Class of Service
972  * @param[in]	reqlen	request length, does not include FCHS length
973  * @param[in]	fchs	fc Header Pointer. The header content will be copied
974  *			in by BFA.
975  *
976  * @param[in]	cbfn	call back function to be called on receiving
977  *								the response
978  * @param[in]	cbarg	arg for cbfn
979  * @param[in]	rsp_timeout
980  *			response timeout
981  *
982  * @return		bfa_status_t
983  */
984 void
985 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
986 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
987 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
988 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
989 {
990 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
991 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
992 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
993 	struct bfi_fcxp_send_req_s	*send_req;
994 
995 	bfa_trc(bfa, fcxp->fcxp_tag);
996 
997 	/*
998 	 * setup request/response info
999 	 */
1000 	reqi->bfa_rport = rport;
1001 	reqi->vf_id = vf_id;
1002 	reqi->lp_tag = lp_tag;
1003 	reqi->class = cos;
1004 	rspi->rsp_timeout = rsp_timeout;
1005 	reqi->cts = cts;
1006 	reqi->fchs = *fchs;
1007 	reqi->req_tot_len = reqlen;
1008 	rspi->rsp_maxlen = rsp_maxlen;
1009 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1010 	fcxp->send_cbarg = cbarg;
1011 
1012 	/*
1013 	 * If no room in CPE queue, wait for space in request queue
1014 	 */
1015 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1016 	if (!send_req) {
1017 		bfa_trc(bfa, fcxp->fcxp_tag);
1018 		fcxp->reqq_waiting = BFA_TRUE;
1019 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1020 		return;
1021 	}
1022 
1023 	bfa_fcxp_queue(fcxp, send_req);
1024 }
1025 
1026 /*
1027  * Abort a BFA FCXP
1028  *
1029  * @param[in]	fcxp	BFA fcxp pointer
1030  *
1031  * @return		void
1032  */
1033 bfa_status_t
1034 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1035 {
1036 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1037 	WARN_ON(1);
1038 	return BFA_STATUS_OK;
1039 }
1040 
1041 void
1042 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1043 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1044 	       void *caller, int nreq_sgles,
1045 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1046 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1047 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1048 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1049 {
1050 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1051 
1052 	if (req)
1053 		WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1054 	else
1055 		WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1056 
1057 	wqe->alloc_cbfn = alloc_cbfn;
1058 	wqe->alloc_cbarg = alloc_cbarg;
1059 	wqe->caller = caller;
1060 	wqe->bfa = bfa;
1061 	wqe->nreq_sgles = nreq_sgles;
1062 	wqe->nrsp_sgles = nrsp_sgles;
1063 	wqe->req_sga_cbfn = req_sga_cbfn;
1064 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1065 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1066 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1067 
1068 	if (req)
1069 		list_add_tail(&wqe->qe, &mod->req_wait_q);
1070 	else
1071 		list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1072 }
1073 
1074 void
1075 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1076 {
1077 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1078 
1079 	WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1080 		!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1081 	list_del(&wqe->qe);
1082 }
1083 
1084 void
1085 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1086 {
1087 	/*
1088 	 * If waiting for room in request queue, cancel reqq wait
1089 	 * and free fcxp.
1090 	 */
1091 	if (fcxp->reqq_waiting) {
1092 		fcxp->reqq_waiting = BFA_FALSE;
1093 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1094 		bfa_fcxp_free(fcxp);
1095 		return;
1096 	}
1097 
1098 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1099 }
1100 
1101 void
1102 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1103 {
1104 	switch (msg->mhdr.msg_id) {
1105 	case BFI_FCXP_I2H_SEND_RSP:
1106 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1107 		break;
1108 
1109 	default:
1110 		bfa_trc(bfa, msg->mhdr.msg_id);
1111 		WARN_ON(1);
1112 	}
1113 }
1114 
1115 u32
1116 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1117 {
1118 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1119 
1120 	return mod->rsp_pld_sz;
1121 }
1122 
1123 void
1124 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1125 {
1126 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
1127 	struct list_head	*qe;
1128 	int	i;
1129 
1130 	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1131 		if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1132 			bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1133 			list_add_tail(qe, &mod->fcxp_req_unused_q);
1134 		} else {
1135 			bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1136 			list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1137 		}
1138 	}
1139 }
1140 
1141 /*
1142  *  BFA LPS state machine functions
1143  */
1144 
1145 /*
1146  * Init state -- no login
1147  */
1148 static void
1149 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1150 {
1151 	bfa_trc(lps->bfa, lps->bfa_tag);
1152 	bfa_trc(lps->bfa, event);
1153 
1154 	switch (event) {
1155 	case BFA_LPS_SM_LOGIN:
1156 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1157 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1158 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1159 		} else {
1160 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1161 			bfa_lps_send_login(lps);
1162 		}
1163 
1164 		if (lps->fdisc)
1165 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1166 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1167 		else
1168 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1169 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1170 		break;
1171 
1172 	case BFA_LPS_SM_LOGOUT:
1173 		bfa_lps_logout_comp(lps);
1174 		break;
1175 
1176 	case BFA_LPS_SM_DELETE:
1177 		bfa_lps_free(lps);
1178 		break;
1179 
1180 	case BFA_LPS_SM_RX_CVL:
1181 	case BFA_LPS_SM_OFFLINE:
1182 		break;
1183 
1184 	case BFA_LPS_SM_FWRSP:
1185 		/*
1186 		 * Could happen when fabric detects loopback and discards
1187 		 * the lps request. Fw will eventually sent out the timeout
1188 		 * Just ignore
1189 		 */
1190 		break;
1191 	case BFA_LPS_SM_SET_N2N_PID:
1192 		/*
1193 		 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1194 		 * this event. Ignore this event.
1195 		 */
1196 		break;
1197 
1198 	default:
1199 		bfa_sm_fault(lps->bfa, event);
1200 	}
1201 }
1202 
1203 /*
1204  * login is in progress -- awaiting response from firmware
1205  */
1206 static void
1207 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1208 {
1209 	bfa_trc(lps->bfa, lps->bfa_tag);
1210 	bfa_trc(lps->bfa, event);
1211 
1212 	switch (event) {
1213 	case BFA_LPS_SM_FWRSP:
1214 		if (lps->status == BFA_STATUS_OK) {
1215 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1216 			if (lps->fdisc)
1217 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1218 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1219 			else
1220 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1221 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1222 			/* If N2N, send the assigned PID to FW */
1223 			bfa_trc(lps->bfa, lps->fport);
1224 			bfa_trc(lps->bfa, lps->lp_pid);
1225 
1226 			if (!lps->fport && lps->lp_pid)
1227 				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1228 		} else {
1229 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1230 			if (lps->fdisc)
1231 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1232 					BFA_PL_EID_LOGIN, 0,
1233 					"FDISC Fail (RJT or timeout)");
1234 			else
1235 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1236 					BFA_PL_EID_LOGIN, 0,
1237 					"FLOGI Fail (RJT or timeout)");
1238 		}
1239 		bfa_lps_login_comp(lps);
1240 		break;
1241 
1242 	case BFA_LPS_SM_OFFLINE:
1243 	case BFA_LPS_SM_DELETE:
1244 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1245 		break;
1246 
1247 	case BFA_LPS_SM_SET_N2N_PID:
1248 		bfa_trc(lps->bfa, lps->fport);
1249 		bfa_trc(lps->bfa, lps->lp_pid);
1250 		break;
1251 
1252 	default:
1253 		bfa_sm_fault(lps->bfa, event);
1254 	}
1255 }
1256 
1257 /*
1258  * login pending - awaiting space in request queue
1259  */
1260 static void
1261 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1262 {
1263 	bfa_trc(lps->bfa, lps->bfa_tag);
1264 	bfa_trc(lps->bfa, event);
1265 
1266 	switch (event) {
1267 	case BFA_LPS_SM_RESUME:
1268 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1269 		bfa_lps_send_login(lps);
1270 		break;
1271 
1272 	case BFA_LPS_SM_OFFLINE:
1273 	case BFA_LPS_SM_DELETE:
1274 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1275 		bfa_reqq_wcancel(&lps->wqe);
1276 		break;
1277 
1278 	case BFA_LPS_SM_RX_CVL:
1279 		/*
1280 		 * Login was not even sent out; so when getting out
1281 		 * of this state, it will appear like a login retry
1282 		 * after Clear virtual link
1283 		 */
1284 		break;
1285 
1286 	default:
1287 		bfa_sm_fault(lps->bfa, event);
1288 	}
1289 }
1290 
1291 /*
1292  * login complete
1293  */
1294 static void
1295 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1296 {
1297 	bfa_trc(lps->bfa, lps->bfa_tag);
1298 	bfa_trc(lps->bfa, event);
1299 
1300 	switch (event) {
1301 	case BFA_LPS_SM_LOGOUT:
1302 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1303 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1304 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1305 		} else {
1306 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1307 			bfa_lps_send_logout(lps);
1308 		}
1309 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1310 			BFA_PL_EID_LOGO, 0, "Logout");
1311 		break;
1312 
1313 	case BFA_LPS_SM_RX_CVL:
1314 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1315 
1316 		/* Let the vport module know about this event */
1317 		bfa_lps_cvl_event(lps);
1318 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1319 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1320 		break;
1321 
1322 	case BFA_LPS_SM_SET_N2N_PID:
1323 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1324 			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1325 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1326 		} else
1327 			bfa_lps_send_set_n2n_pid(lps);
1328 		break;
1329 
1330 	case BFA_LPS_SM_OFFLINE:
1331 	case BFA_LPS_SM_DELETE:
1332 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1333 		break;
1334 
1335 	default:
1336 		bfa_sm_fault(lps->bfa, event);
1337 	}
1338 }
1339 
1340 /*
1341  * login complete
1342  */
1343 static void
1344 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1345 {
1346 	bfa_trc(lps->bfa, lps->bfa_tag);
1347 	bfa_trc(lps->bfa, event);
1348 
1349 	switch (event) {
1350 	case BFA_LPS_SM_RESUME:
1351 		bfa_sm_set_state(lps, bfa_lps_sm_online);
1352 		bfa_lps_send_set_n2n_pid(lps);
1353 		break;
1354 
1355 	case BFA_LPS_SM_LOGOUT:
1356 		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1357 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1358 			BFA_PL_EID_LOGO, 0, "Logout");
1359 		break;
1360 
1361 	case BFA_LPS_SM_RX_CVL:
1362 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1363 		bfa_reqq_wcancel(&lps->wqe);
1364 
1365 		/* Let the vport module know about this event */
1366 		bfa_lps_cvl_event(lps);
1367 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1368 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1369 		break;
1370 
1371 	case BFA_LPS_SM_OFFLINE:
1372 	case BFA_LPS_SM_DELETE:
1373 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1374 		bfa_reqq_wcancel(&lps->wqe);
1375 		break;
1376 
1377 	default:
1378 		bfa_sm_fault(lps->bfa, event);
1379 	}
1380 }
1381 
1382 /*
1383  * logout in progress - awaiting firmware response
1384  */
1385 static void
1386 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1387 {
1388 	bfa_trc(lps->bfa, lps->bfa_tag);
1389 	bfa_trc(lps->bfa, event);
1390 
1391 	switch (event) {
1392 	case BFA_LPS_SM_FWRSP:
1393 	case BFA_LPS_SM_OFFLINE:
1394 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1395 		bfa_lps_logout_comp(lps);
1396 		break;
1397 
1398 	case BFA_LPS_SM_DELETE:
1399 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1400 		break;
1401 
1402 	default:
1403 		bfa_sm_fault(lps->bfa, event);
1404 	}
1405 }
1406 
1407 /*
1408  * logout pending -- awaiting space in request queue
1409  */
1410 static void
1411 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1412 {
1413 	bfa_trc(lps->bfa, lps->bfa_tag);
1414 	bfa_trc(lps->bfa, event);
1415 
1416 	switch (event) {
1417 	case BFA_LPS_SM_RESUME:
1418 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1419 		bfa_lps_send_logout(lps);
1420 		break;
1421 
1422 	case BFA_LPS_SM_OFFLINE:
1423 	case BFA_LPS_SM_DELETE:
1424 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1425 		bfa_reqq_wcancel(&lps->wqe);
1426 		break;
1427 
1428 	default:
1429 		bfa_sm_fault(lps->bfa, event);
1430 	}
1431 }
1432 
1433 
1434 
1435 /*
1436  *  lps_pvt BFA LPS private functions
1437  */
1438 
1439 /*
1440  * return memory requirement
1441  */
1442 void
1443 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1444 		struct bfa_s *bfa)
1445 {
1446 	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1447 
1448 	if (cfg->drvcfg.min_cfg)
1449 		bfa_mem_kva_setup(minfo, lps_kva,
1450 			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1451 	else
1452 		bfa_mem_kva_setup(minfo, lps_kva,
1453 			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1454 }
1455 
1456 /*
1457  * bfa module attach at initialization time
1458  */
1459 void
1460 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1461 	struct bfa_pcidev_s *pcidev)
1462 {
1463 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1464 	struct bfa_lps_s	*lps;
1465 	int			i;
1466 
1467 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1468 	if (cfg->drvcfg.min_cfg)
1469 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1470 	else
1471 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1472 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1473 
1474 	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1475 
1476 	INIT_LIST_HEAD(&mod->lps_free_q);
1477 	INIT_LIST_HEAD(&mod->lps_active_q);
1478 	INIT_LIST_HEAD(&mod->lps_login_q);
1479 
1480 	for (i = 0; i < mod->num_lps; i++, lps++) {
1481 		lps->bfa	= bfa;
1482 		lps->bfa_tag	= (u8) i;
1483 		lps->reqq	= BFA_REQQ_LPS;
1484 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1485 		list_add_tail(&lps->qe, &mod->lps_free_q);
1486 	}
1487 }
1488 
1489 /*
1490  * IOC in disabled state -- consider all lps offline
1491  */
1492 void
1493 bfa_lps_iocdisable(struct bfa_s *bfa)
1494 {
1495 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1496 	struct bfa_lps_s	*lps;
1497 	struct list_head		*qe, *qen;
1498 
1499 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1500 		lps = (struct bfa_lps_s *) qe;
1501 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1502 	}
1503 	list_for_each_safe(qe, qen, &mod->lps_login_q) {
1504 		lps = (struct bfa_lps_s *) qe;
1505 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1506 	}
1507 	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1508 }
1509 
1510 /*
1511  * Firmware login response
1512  */
1513 static void
1514 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1515 {
1516 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1517 	struct bfa_lps_s	*lps;
1518 
1519 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1520 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1521 
1522 	lps->status = rsp->status;
1523 	switch (rsp->status) {
1524 	case BFA_STATUS_OK:
1525 		lps->fw_tag	= rsp->fw_tag;
1526 		lps->fport	= rsp->f_port;
1527 		if (lps->fport)
1528 			lps->lp_pid = rsp->lp_pid;
1529 		lps->npiv_en	= rsp->npiv_en;
1530 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1531 		lps->pr_pwwn	= rsp->port_name;
1532 		lps->pr_nwwn	= rsp->node_name;
1533 		lps->auth_req	= rsp->auth_req;
1534 		lps->lp_mac	= rsp->lp_mac;
1535 		lps->brcd_switch = rsp->brcd_switch;
1536 		lps->fcf_mac	= rsp->fcf_mac;
1537 
1538 		break;
1539 
1540 	case BFA_STATUS_FABRIC_RJT:
1541 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1542 		lps->lsrjt_expl = rsp->lsrjt_expl;
1543 
1544 		break;
1545 
1546 	case BFA_STATUS_EPROTOCOL:
1547 		lps->ext_status = rsp->ext_status;
1548 
1549 		break;
1550 
1551 	case BFA_STATUS_VPORT_MAX:
1552 		if (rsp->ext_status)
1553 			bfa_lps_no_res(lps, rsp->ext_status);
1554 		break;
1555 
1556 	default:
1557 		/* Nothing to do with other status */
1558 		break;
1559 	}
1560 
1561 	list_del(&lps->qe);
1562 	list_add_tail(&lps->qe, &mod->lps_active_q);
1563 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1564 }
1565 
1566 static void
1567 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1568 {
1569 	struct bfa_s		*bfa = first_lps->bfa;
1570 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1571 	struct list_head	*qe, *qe_next;
1572 	struct bfa_lps_s	*lps;
1573 
1574 	bfa_trc(bfa, count);
1575 
1576 	qe = bfa_q_next(first_lps);
1577 
1578 	while (count && qe) {
1579 		qe_next = bfa_q_next(qe);
1580 		lps = (struct bfa_lps_s *)qe;
1581 		bfa_trc(bfa, lps->bfa_tag);
1582 		lps->status = first_lps->status;
1583 		list_del(&lps->qe);
1584 		list_add_tail(&lps->qe, &mod->lps_active_q);
1585 		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1586 		qe = qe_next;
1587 		count--;
1588 	}
1589 }
1590 
1591 /*
1592  * Firmware logout response
1593  */
1594 static void
1595 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1596 {
1597 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1598 	struct bfa_lps_s	*lps;
1599 
1600 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1601 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1602 
1603 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1604 }
1605 
1606 /*
1607  * Firmware received a Clear virtual link request (for FCoE)
1608  */
1609 static void
1610 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1611 {
1612 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1613 	struct bfa_lps_s	*lps;
1614 
1615 	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1616 
1617 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1618 }
1619 
1620 /*
1621  * Space is available in request queue, resume queueing request to firmware.
1622  */
1623 static void
1624 bfa_lps_reqq_resume(void *lps_arg)
1625 {
1626 	struct bfa_lps_s	*lps = lps_arg;
1627 
1628 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1629 }
1630 
1631 /*
1632  * lps is freed -- triggered by vport delete
1633  */
1634 static void
1635 bfa_lps_free(struct bfa_lps_s *lps)
1636 {
1637 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1638 
1639 	lps->lp_pid = 0;
1640 	list_del(&lps->qe);
1641 	list_add_tail(&lps->qe, &mod->lps_free_q);
1642 }
1643 
1644 /*
1645  * send login request to firmware
1646  */
1647 static void
1648 bfa_lps_send_login(struct bfa_lps_s *lps)
1649 {
1650 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1651 	struct bfi_lps_login_req_s	*m;
1652 
1653 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1654 	WARN_ON(!m);
1655 
1656 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1657 		bfa_fn_lpu(lps->bfa));
1658 
1659 	m->bfa_tag	= lps->bfa_tag;
1660 	m->alpa		= lps->alpa;
1661 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1662 	m->pwwn		= lps->pwwn;
1663 	m->nwwn		= lps->nwwn;
1664 	m->fdisc	= lps->fdisc;
1665 	m->auth_en	= lps->auth_en;
1666 
1667 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1668 	list_del(&lps->qe);
1669 	list_add_tail(&lps->qe, &mod->lps_login_q);
1670 }
1671 
1672 /*
1673  * send logout request to firmware
1674  */
1675 static void
1676 bfa_lps_send_logout(struct bfa_lps_s *lps)
1677 {
1678 	struct bfi_lps_logout_req_s *m;
1679 
1680 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1681 	WARN_ON(!m);
1682 
1683 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1684 		bfa_fn_lpu(lps->bfa));
1685 
1686 	m->fw_tag = lps->fw_tag;
1687 	m->port_name = lps->pwwn;
1688 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1689 }
1690 
1691 /*
1692  * send n2n pid set request to firmware
1693  */
1694 static void
1695 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1696 {
1697 	struct bfi_lps_n2n_pid_req_s *m;
1698 
1699 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1700 	WARN_ON(!m);
1701 
1702 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1703 		bfa_fn_lpu(lps->bfa));
1704 
1705 	m->fw_tag = lps->fw_tag;
1706 	m->lp_pid = lps->lp_pid;
1707 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1708 }
1709 
1710 /*
1711  * Indirect login completion handler for non-fcs
1712  */
1713 static void
1714 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1715 {
1716 	struct bfa_lps_s *lps	= arg;
1717 
1718 	if (!complete)
1719 		return;
1720 
1721 	if (lps->fdisc)
1722 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1723 	else
1724 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1725 }
1726 
1727 /*
1728  * Login completion handler -- direct call for fcs, queue for others
1729  */
1730 static void
1731 bfa_lps_login_comp(struct bfa_lps_s *lps)
1732 {
1733 	if (!lps->bfa->fcs) {
1734 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1735 			lps);
1736 		return;
1737 	}
1738 
1739 	if (lps->fdisc)
1740 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1741 	else
1742 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1743 }
1744 
1745 /*
1746  * Indirect logout completion handler for non-fcs
1747  */
1748 static void
1749 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1750 {
1751 	struct bfa_lps_s *lps	= arg;
1752 
1753 	if (!complete)
1754 		return;
1755 
1756 	if (lps->fdisc)
1757 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1758 	else
1759 		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1760 }
1761 
1762 /*
1763  * Logout completion handler -- direct call for fcs, queue for others
1764  */
1765 static void
1766 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1767 {
1768 	if (!lps->bfa->fcs) {
1769 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1770 			lps);
1771 		return;
1772 	}
1773 	if (lps->fdisc)
1774 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1775 }
1776 
1777 /*
1778  * Clear virtual link completion handler for non-fcs
1779  */
1780 static void
1781 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1782 {
1783 	struct bfa_lps_s *lps	= arg;
1784 
1785 	if (!complete)
1786 		return;
1787 
1788 	/* Clear virtual link to base port will result in link down */
1789 	if (lps->fdisc)
1790 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1791 }
1792 
1793 /*
1794  * Received Clear virtual link event --direct call for fcs,
1795  * queue for others
1796  */
1797 static void
1798 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1799 {
1800 	if (!lps->bfa->fcs) {
1801 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1802 			lps);
1803 		return;
1804 	}
1805 
1806 	/* Clear virtual link to base port will result in link down */
1807 	if (lps->fdisc)
1808 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1809 }
1810 
1811 
1812 
1813 /*
1814  *  lps_public BFA LPS public functions
1815  */
1816 
1817 u32
1818 bfa_lps_get_max_vport(struct bfa_s *bfa)
1819 {
1820 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1821 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1822 	else
1823 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1824 }
1825 
1826 /*
1827  * Allocate a lport srvice tag.
1828  */
1829 struct bfa_lps_s  *
1830 bfa_lps_alloc(struct bfa_s *bfa)
1831 {
1832 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1833 	struct bfa_lps_s	*lps = NULL;
1834 
1835 	bfa_q_deq(&mod->lps_free_q, &lps);
1836 
1837 	if (lps == NULL)
1838 		return NULL;
1839 
1840 	list_add_tail(&lps->qe, &mod->lps_active_q);
1841 
1842 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1843 	return lps;
1844 }
1845 
1846 /*
1847  * Free lport service tag. This can be called anytime after an alloc.
1848  * No need to wait for any pending login/logout completions.
1849  */
1850 void
1851 bfa_lps_delete(struct bfa_lps_s *lps)
1852 {
1853 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1854 }
1855 
1856 /*
1857  * Initiate a lport login.
1858  */
1859 void
1860 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1861 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1862 {
1863 	lps->uarg	= uarg;
1864 	lps->alpa	= alpa;
1865 	lps->pdusz	= pdusz;
1866 	lps->pwwn	= pwwn;
1867 	lps->nwwn	= nwwn;
1868 	lps->fdisc	= BFA_FALSE;
1869 	lps->auth_en	= auth_en;
1870 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1871 }
1872 
1873 /*
1874  * Initiate a lport fdisc login.
1875  */
1876 void
1877 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1878 	wwn_t nwwn)
1879 {
1880 	lps->uarg	= uarg;
1881 	lps->alpa	= 0;
1882 	lps->pdusz	= pdusz;
1883 	lps->pwwn	= pwwn;
1884 	lps->nwwn	= nwwn;
1885 	lps->fdisc	= BFA_TRUE;
1886 	lps->auth_en	= BFA_FALSE;
1887 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1888 }
1889 
1890 
1891 /*
1892  * Initiate a lport FDSIC logout.
1893  */
1894 void
1895 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1896 {
1897 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1898 }
1899 
1900 u8
1901 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1902 {
1903 	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1904 
1905 	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1906 }
1907 
1908 /*
1909  * Return lport services tag given the pid
1910  */
1911 u8
1912 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1913 {
1914 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1915 	struct bfa_lps_s	*lps;
1916 	int			i;
1917 
1918 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1919 		if (lps->lp_pid == pid)
1920 			return lps->bfa_tag;
1921 	}
1922 
1923 	/* Return base port tag anyway */
1924 	return 0;
1925 }
1926 
1927 
1928 /*
1929  * return port id assigned to the base lport
1930  */
1931 u32
1932 bfa_lps_get_base_pid(struct bfa_s *bfa)
1933 {
1934 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1935 
1936 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1937 }
1938 
1939 /*
1940  * Set PID in case of n2n (which is assigned during PLOGI)
1941  */
1942 void
1943 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1944 {
1945 	bfa_trc(lps->bfa, lps->bfa_tag);
1946 	bfa_trc(lps->bfa, n2n_pid);
1947 
1948 	lps->lp_pid = n2n_pid;
1949 	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1950 }
1951 
1952 /*
1953  * LPS firmware message class handler.
1954  */
1955 void
1956 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1957 {
1958 	union bfi_lps_i2h_msg_u	msg;
1959 
1960 	bfa_trc(bfa, m->mhdr.msg_id);
1961 	msg.msg = m;
1962 
1963 	switch (m->mhdr.msg_id) {
1964 	case BFI_LPS_I2H_LOGIN_RSP:
1965 		bfa_lps_login_rsp(bfa, msg.login_rsp);
1966 		break;
1967 
1968 	case BFI_LPS_I2H_LOGOUT_RSP:
1969 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1970 		break;
1971 
1972 	case BFI_LPS_I2H_CVL_EVENT:
1973 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1974 		break;
1975 
1976 	default:
1977 		bfa_trc(bfa, m->mhdr.msg_id);
1978 		WARN_ON(1);
1979 	}
1980 }
1981 
1982 static void
1983 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
1984 {
1985 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1986 	struct bfa_aen_entry_s  *aen_entry;
1987 
1988 	bfad_get_aen_entry(bfad, aen_entry);
1989 	if (!aen_entry)
1990 		return;
1991 
1992 	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
1993 	aen_entry->aen_data.port.pwwn = fcport->pwwn;
1994 
1995 	/* Send the AEN notification */
1996 	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
1997 				  BFA_AEN_CAT_PORT, event);
1998 }
1999 
2000 /*
2001  * FC PORT state machine functions
2002  */
2003 static void
2004 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2005 			enum bfa_fcport_sm_event event)
2006 {
2007 	bfa_trc(fcport->bfa, event);
2008 
2009 	switch (event) {
2010 	case BFA_FCPORT_SM_START:
2011 		/*
2012 		 * Start event after IOC is configured and BFA is started.
2013 		 */
2014 		fcport->use_flash_cfg = BFA_TRUE;
2015 
2016 		if (bfa_fcport_send_enable(fcport)) {
2017 			bfa_trc(fcport->bfa, BFA_TRUE);
2018 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2019 		} else {
2020 			bfa_trc(fcport->bfa, BFA_FALSE);
2021 			bfa_sm_set_state(fcport,
2022 					bfa_fcport_sm_enabling_qwait);
2023 		}
2024 		break;
2025 
2026 	case BFA_FCPORT_SM_ENABLE:
2027 		/*
2028 		 * Port is persistently configured to be in enabled state. Do
2029 		 * not change state. Port enabling is done when START event is
2030 		 * received.
2031 		 */
2032 		break;
2033 
2034 	case BFA_FCPORT_SM_DISABLE:
2035 		/*
2036 		 * If a port is persistently configured to be disabled, the
2037 		 * first event will a port disable request.
2038 		 */
2039 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2040 		break;
2041 
2042 	case BFA_FCPORT_SM_HWFAIL:
2043 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2044 		break;
2045 
2046 	default:
2047 		bfa_sm_fault(fcport->bfa, event);
2048 	}
2049 }
2050 
2051 static void
2052 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2053 				enum bfa_fcport_sm_event event)
2054 {
2055 	char pwwn_buf[BFA_STRING_32];
2056 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2057 	bfa_trc(fcport->bfa, event);
2058 
2059 	switch (event) {
2060 	case BFA_FCPORT_SM_QRESUME:
2061 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2062 		bfa_fcport_send_enable(fcport);
2063 		break;
2064 
2065 	case BFA_FCPORT_SM_STOP:
2066 		bfa_reqq_wcancel(&fcport->reqq_wait);
2067 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2068 		break;
2069 
2070 	case BFA_FCPORT_SM_ENABLE:
2071 		/*
2072 		 * Already enable is in progress.
2073 		 */
2074 		break;
2075 
2076 	case BFA_FCPORT_SM_DISABLE:
2077 		/*
2078 		 * Just send disable request to firmware when room becomes
2079 		 * available in request queue.
2080 		 */
2081 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2082 		bfa_reqq_wcancel(&fcport->reqq_wait);
2083 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2084 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2085 		wwn2str(pwwn_buf, fcport->pwwn);
2086 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2087 			"Base port disabled: WWN = %s\n", pwwn_buf);
2088 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2089 		break;
2090 
2091 	case BFA_FCPORT_SM_LINKUP:
2092 	case BFA_FCPORT_SM_LINKDOWN:
2093 		/*
2094 		 * Possible to get link events when doing back-to-back
2095 		 * enable/disables.
2096 		 */
2097 		break;
2098 
2099 	case BFA_FCPORT_SM_HWFAIL:
2100 		bfa_reqq_wcancel(&fcport->reqq_wait);
2101 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2102 		break;
2103 
2104 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2105 		bfa_fcport_reset_linkinfo(fcport);
2106 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2107 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2108 		break;
2109 
2110 	default:
2111 		bfa_sm_fault(fcport->bfa, event);
2112 	}
2113 }
2114 
2115 static void
2116 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2117 						enum bfa_fcport_sm_event event)
2118 {
2119 	char pwwn_buf[BFA_STRING_32];
2120 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2121 	bfa_trc(fcport->bfa, event);
2122 
2123 	switch (event) {
2124 	case BFA_FCPORT_SM_FWRSP:
2125 	case BFA_FCPORT_SM_LINKDOWN:
2126 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2127 		break;
2128 
2129 	case BFA_FCPORT_SM_LINKUP:
2130 		bfa_fcport_update_linkinfo(fcport);
2131 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2132 
2133 		WARN_ON(!fcport->event_cbfn);
2134 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2135 		break;
2136 
2137 	case BFA_FCPORT_SM_ENABLE:
2138 		/*
2139 		 * Already being enabled.
2140 		 */
2141 		break;
2142 
2143 	case BFA_FCPORT_SM_DISABLE:
2144 		if (bfa_fcport_send_disable(fcport))
2145 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2146 		else
2147 			bfa_sm_set_state(fcport,
2148 					 bfa_fcport_sm_disabling_qwait);
2149 
2150 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2151 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2152 		wwn2str(pwwn_buf, fcport->pwwn);
2153 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2154 			"Base port disabled: WWN = %s\n", pwwn_buf);
2155 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2156 		break;
2157 
2158 	case BFA_FCPORT_SM_STOP:
2159 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2160 		break;
2161 
2162 	case BFA_FCPORT_SM_HWFAIL:
2163 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2164 		break;
2165 
2166 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2167 		bfa_fcport_reset_linkinfo(fcport);
2168 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2169 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2170 		break;
2171 
2172 	default:
2173 		bfa_sm_fault(fcport->bfa, event);
2174 	}
2175 }
2176 
2177 static void
2178 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2179 						enum bfa_fcport_sm_event event)
2180 {
2181 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2182 	char pwwn_buf[BFA_STRING_32];
2183 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2184 
2185 	bfa_trc(fcport->bfa, event);
2186 
2187 	switch (event) {
2188 	case BFA_FCPORT_SM_LINKUP:
2189 		bfa_fcport_update_linkinfo(fcport);
2190 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2191 		WARN_ON(!fcport->event_cbfn);
2192 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2193 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2194 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2195 
2196 			bfa_trc(fcport->bfa,
2197 				pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2198 			bfa_trc(fcport->bfa,
2199 				pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2200 
2201 			if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2202 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2203 					BFA_PL_EID_FIP_FCF_DISC, 0,
2204 					"FIP FCF Discovery Failed");
2205 			else
2206 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2207 					BFA_PL_EID_FIP_FCF_DISC, 0,
2208 					"FIP FCF Discovered");
2209 		}
2210 
2211 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2212 		wwn2str(pwwn_buf, fcport->pwwn);
2213 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2214 			"Base port online: WWN = %s\n", pwwn_buf);
2215 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2216 
2217 		/* If QoS is enabled and it is not online, send AEN */
2218 		if (fcport->cfg.qos_enabled &&
2219 		    fcport->qos_attr.state != BFA_QOS_ONLINE)
2220 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2221 		break;
2222 
2223 	case BFA_FCPORT_SM_LINKDOWN:
2224 		/*
2225 		 * Possible to get link down event.
2226 		 */
2227 		break;
2228 
2229 	case BFA_FCPORT_SM_ENABLE:
2230 		/*
2231 		 * Already enabled.
2232 		 */
2233 		break;
2234 
2235 	case BFA_FCPORT_SM_DISABLE:
2236 		if (bfa_fcport_send_disable(fcport))
2237 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2238 		else
2239 			bfa_sm_set_state(fcport,
2240 					 bfa_fcport_sm_disabling_qwait);
2241 
2242 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2243 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2244 		wwn2str(pwwn_buf, fcport->pwwn);
2245 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2246 			"Base port disabled: WWN = %s\n", pwwn_buf);
2247 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2248 		break;
2249 
2250 	case BFA_FCPORT_SM_STOP:
2251 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2252 		break;
2253 
2254 	case BFA_FCPORT_SM_HWFAIL:
2255 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2256 		break;
2257 
2258 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2259 		bfa_fcport_reset_linkinfo(fcport);
2260 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2261 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2262 		break;
2263 
2264 	default:
2265 		bfa_sm_fault(fcport->bfa, event);
2266 	}
2267 }
2268 
2269 static void
2270 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2271 	enum bfa_fcport_sm_event event)
2272 {
2273 	char pwwn_buf[BFA_STRING_32];
2274 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2275 
2276 	bfa_trc(fcport->bfa, event);
2277 
2278 	switch (event) {
2279 	case BFA_FCPORT_SM_ENABLE:
2280 		/*
2281 		 * Already enabled.
2282 		 */
2283 		break;
2284 
2285 	case BFA_FCPORT_SM_DISABLE:
2286 		if (bfa_fcport_send_disable(fcport))
2287 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2288 		else
2289 			bfa_sm_set_state(fcport,
2290 					 bfa_fcport_sm_disabling_qwait);
2291 
2292 		bfa_fcport_reset_linkinfo(fcport);
2293 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2294 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2295 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2296 		wwn2str(pwwn_buf, fcport->pwwn);
2297 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2298 			"Base port offline: WWN = %s\n", pwwn_buf);
2299 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2300 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2301 			"Base port disabled: WWN = %s\n", pwwn_buf);
2302 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2303 		break;
2304 
2305 	case BFA_FCPORT_SM_LINKDOWN:
2306 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2307 		bfa_fcport_reset_linkinfo(fcport);
2308 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2309 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2310 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2311 		wwn2str(pwwn_buf, fcport->pwwn);
2312 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2313 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2314 				"Base port offline: WWN = %s\n", pwwn_buf);
2315 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2316 		} else {
2317 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2318 				"Base port (WWN = %s) "
2319 				"lost fabric connectivity\n", pwwn_buf);
2320 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2321 		}
2322 		break;
2323 
2324 	case BFA_FCPORT_SM_STOP:
2325 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2326 		bfa_fcport_reset_linkinfo(fcport);
2327 		wwn2str(pwwn_buf, fcport->pwwn);
2328 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2329 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2330 				"Base port offline: WWN = %s\n", pwwn_buf);
2331 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2332 		} else {
2333 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2334 				"Base port (WWN = %s) "
2335 				"lost fabric connectivity\n", pwwn_buf);
2336 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2337 		}
2338 		break;
2339 
2340 	case BFA_FCPORT_SM_HWFAIL:
2341 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2342 		bfa_fcport_reset_linkinfo(fcport);
2343 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2344 		wwn2str(pwwn_buf, fcport->pwwn);
2345 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2346 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2347 				"Base port offline: WWN = %s\n", pwwn_buf);
2348 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2349 		} else {
2350 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2351 				"Base port (WWN = %s) "
2352 				"lost fabric connectivity\n", pwwn_buf);
2353 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2354 		}
2355 		break;
2356 
2357 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2358 		bfa_fcport_reset_linkinfo(fcport);
2359 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2360 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2361 		break;
2362 
2363 	default:
2364 		bfa_sm_fault(fcport->bfa, event);
2365 	}
2366 }
2367 
2368 static void
2369 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2370 				 enum bfa_fcport_sm_event event)
2371 {
2372 	bfa_trc(fcport->bfa, event);
2373 
2374 	switch (event) {
2375 	case BFA_FCPORT_SM_QRESUME:
2376 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2377 		bfa_fcport_send_disable(fcport);
2378 		break;
2379 
2380 	case BFA_FCPORT_SM_STOP:
2381 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2382 		bfa_reqq_wcancel(&fcport->reqq_wait);
2383 		break;
2384 
2385 	case BFA_FCPORT_SM_ENABLE:
2386 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2387 		break;
2388 
2389 	case BFA_FCPORT_SM_DISABLE:
2390 		/*
2391 		 * Already being disabled.
2392 		 */
2393 		break;
2394 
2395 	case BFA_FCPORT_SM_LINKUP:
2396 	case BFA_FCPORT_SM_LINKDOWN:
2397 		/*
2398 		 * Possible to get link events when doing back-to-back
2399 		 * enable/disables.
2400 		 */
2401 		break;
2402 
2403 	case BFA_FCPORT_SM_HWFAIL:
2404 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2405 		bfa_reqq_wcancel(&fcport->reqq_wait);
2406 		break;
2407 
2408 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2409 		bfa_fcport_reset_linkinfo(fcport);
2410 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2411 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2412 		break;
2413 
2414 	default:
2415 		bfa_sm_fault(fcport->bfa, event);
2416 	}
2417 }
2418 
2419 static void
2420 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2421 				 enum bfa_fcport_sm_event event)
2422 {
2423 	bfa_trc(fcport->bfa, event);
2424 
2425 	switch (event) {
2426 	case BFA_FCPORT_SM_QRESUME:
2427 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2428 		bfa_fcport_send_disable(fcport);
2429 		if (bfa_fcport_send_enable(fcport))
2430 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2431 		else
2432 			bfa_sm_set_state(fcport,
2433 					 bfa_fcport_sm_enabling_qwait);
2434 		break;
2435 
2436 	case BFA_FCPORT_SM_STOP:
2437 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2438 		bfa_reqq_wcancel(&fcport->reqq_wait);
2439 		break;
2440 
2441 	case BFA_FCPORT_SM_ENABLE:
2442 		break;
2443 
2444 	case BFA_FCPORT_SM_DISABLE:
2445 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2446 		break;
2447 
2448 	case BFA_FCPORT_SM_LINKUP:
2449 	case BFA_FCPORT_SM_LINKDOWN:
2450 		/*
2451 		 * Possible to get link events when doing back-to-back
2452 		 * enable/disables.
2453 		 */
2454 		break;
2455 
2456 	case BFA_FCPORT_SM_HWFAIL:
2457 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2458 		bfa_reqq_wcancel(&fcport->reqq_wait);
2459 		break;
2460 
2461 	default:
2462 		bfa_sm_fault(fcport->bfa, event);
2463 	}
2464 }
2465 
2466 static void
2467 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2468 						enum bfa_fcport_sm_event event)
2469 {
2470 	char pwwn_buf[BFA_STRING_32];
2471 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2472 	bfa_trc(fcport->bfa, event);
2473 
2474 	switch (event) {
2475 	case BFA_FCPORT_SM_FWRSP:
2476 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2477 		break;
2478 
2479 	case BFA_FCPORT_SM_DISABLE:
2480 		/*
2481 		 * Already being disabled.
2482 		 */
2483 		break;
2484 
2485 	case BFA_FCPORT_SM_ENABLE:
2486 		if (bfa_fcport_send_enable(fcport))
2487 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2488 		else
2489 			bfa_sm_set_state(fcport,
2490 					 bfa_fcport_sm_enabling_qwait);
2491 
2492 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2493 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2494 		wwn2str(pwwn_buf, fcport->pwwn);
2495 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2496 			"Base port enabled: WWN = %s\n", pwwn_buf);
2497 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2498 		break;
2499 
2500 	case BFA_FCPORT_SM_STOP:
2501 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2502 		break;
2503 
2504 	case BFA_FCPORT_SM_LINKUP:
2505 	case BFA_FCPORT_SM_LINKDOWN:
2506 		/*
2507 		 * Possible to get link events when doing back-to-back
2508 		 * enable/disables.
2509 		 */
2510 		break;
2511 
2512 	case BFA_FCPORT_SM_HWFAIL:
2513 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2514 		break;
2515 
2516 	default:
2517 		bfa_sm_fault(fcport->bfa, event);
2518 	}
2519 }
2520 
2521 static void
2522 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2523 						enum bfa_fcport_sm_event event)
2524 {
2525 	char pwwn_buf[BFA_STRING_32];
2526 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2527 	bfa_trc(fcport->bfa, event);
2528 
2529 	switch (event) {
2530 	case BFA_FCPORT_SM_START:
2531 		/*
2532 		 * Ignore start event for a port that is disabled.
2533 		 */
2534 		break;
2535 
2536 	case BFA_FCPORT_SM_STOP:
2537 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2538 		break;
2539 
2540 	case BFA_FCPORT_SM_ENABLE:
2541 		if (bfa_fcport_send_enable(fcport))
2542 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2543 		else
2544 			bfa_sm_set_state(fcport,
2545 					 bfa_fcport_sm_enabling_qwait);
2546 
2547 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2548 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2549 		wwn2str(pwwn_buf, fcport->pwwn);
2550 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2551 			"Base port enabled: WWN = %s\n", pwwn_buf);
2552 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2553 		break;
2554 
2555 	case BFA_FCPORT_SM_DISABLE:
2556 		/*
2557 		 * Already disabled.
2558 		 */
2559 		break;
2560 
2561 	case BFA_FCPORT_SM_HWFAIL:
2562 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2563 		break;
2564 
2565 	case BFA_FCPORT_SM_DPORTENABLE:
2566 		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2567 		break;
2568 
2569 	case BFA_FCPORT_SM_DDPORTENABLE:
2570 		bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2571 		break;
2572 
2573 	default:
2574 		bfa_sm_fault(fcport->bfa, event);
2575 	}
2576 }
2577 
2578 static void
2579 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2580 			 enum bfa_fcport_sm_event event)
2581 {
2582 	bfa_trc(fcport->bfa, event);
2583 
2584 	switch (event) {
2585 	case BFA_FCPORT_SM_START:
2586 		if (bfa_fcport_send_enable(fcport))
2587 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2588 		else
2589 			bfa_sm_set_state(fcport,
2590 					 bfa_fcport_sm_enabling_qwait);
2591 		break;
2592 
2593 	default:
2594 		/*
2595 		 * Ignore all other events.
2596 		 */
2597 		;
2598 	}
2599 }
2600 
2601 /*
2602  * Port is enabled. IOC is down/failed.
2603  */
2604 static void
2605 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2606 			 enum bfa_fcport_sm_event event)
2607 {
2608 	bfa_trc(fcport->bfa, event);
2609 
2610 	switch (event) {
2611 	case BFA_FCPORT_SM_START:
2612 		if (bfa_fcport_send_enable(fcport))
2613 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2614 		else
2615 			bfa_sm_set_state(fcport,
2616 					 bfa_fcport_sm_enabling_qwait);
2617 		break;
2618 
2619 	default:
2620 		/*
2621 		 * Ignore all events.
2622 		 */
2623 		;
2624 	}
2625 }
2626 
2627 /*
2628  * Port is disabled. IOC is down/failed.
2629  */
2630 static void
2631 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2632 			 enum bfa_fcport_sm_event event)
2633 {
2634 	bfa_trc(fcport->bfa, event);
2635 
2636 	switch (event) {
2637 	case BFA_FCPORT_SM_START:
2638 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2639 		break;
2640 
2641 	case BFA_FCPORT_SM_ENABLE:
2642 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2643 		break;
2644 
2645 	default:
2646 		/*
2647 		 * Ignore all events.
2648 		 */
2649 		;
2650 	}
2651 }
2652 
2653 static void
2654 bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2655 {
2656 	bfa_trc(fcport->bfa, event);
2657 
2658 	switch (event) {
2659 	case BFA_FCPORT_SM_DPORTENABLE:
2660 	case BFA_FCPORT_SM_DISABLE:
2661 	case BFA_FCPORT_SM_ENABLE:
2662 	case BFA_FCPORT_SM_START:
2663 		/*
2664 		 * Ignore event for a port that is dport
2665 		 */
2666 		break;
2667 
2668 	case BFA_FCPORT_SM_STOP:
2669 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2670 		break;
2671 
2672 	case BFA_FCPORT_SM_HWFAIL:
2673 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2674 		break;
2675 
2676 	case BFA_FCPORT_SM_DPORTDISABLE:
2677 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2678 		break;
2679 
2680 	default:
2681 		bfa_sm_fault(fcport->bfa, event);
2682 	}
2683 }
2684 
2685 static void
2686 bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2687 			enum bfa_fcport_sm_event event)
2688 {
2689 	bfa_trc(fcport->bfa, event);
2690 
2691 	switch (event) {
2692 	case BFA_FCPORT_SM_DISABLE:
2693 	case BFA_FCPORT_SM_DDPORTDISABLE:
2694 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2695 		break;
2696 
2697 	case BFA_FCPORT_SM_DPORTENABLE:
2698 	case BFA_FCPORT_SM_DPORTDISABLE:
2699 	case BFA_FCPORT_SM_ENABLE:
2700 	case BFA_FCPORT_SM_START:
2701 		/*
2702 		 * Ignore event for a port that is ddport
2703 		 */
2704 		break;
2705 
2706 	case BFA_FCPORT_SM_STOP:
2707 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2708 		break;
2709 
2710 	case BFA_FCPORT_SM_HWFAIL:
2711 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2712 		break;
2713 
2714 	default:
2715 		bfa_sm_fault(fcport->bfa, event);
2716 	}
2717 }
2718 
2719 static void
2720 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2721 			    enum bfa_fcport_sm_event event)
2722 {
2723 	bfa_trc(fcport->bfa, event);
2724 
2725 	switch (event) {
2726 	case BFA_FCPORT_SM_DPORTENABLE:
2727 	case BFA_FCPORT_SM_ENABLE:
2728 	case BFA_FCPORT_SM_START:
2729 		/*
2730 		 * Ignore event for a port as there is FAA misconfig
2731 		 */
2732 		break;
2733 
2734 	case BFA_FCPORT_SM_DISABLE:
2735 		if (bfa_fcport_send_disable(fcport))
2736 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2737 		else
2738 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2739 
2740 		bfa_fcport_reset_linkinfo(fcport);
2741 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2742 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2743 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2744 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2745 		break;
2746 
2747 	case BFA_FCPORT_SM_STOP:
2748 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2749 		break;
2750 
2751 	case BFA_FCPORT_SM_HWFAIL:
2752 		bfa_fcport_reset_linkinfo(fcport);
2753 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2754 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2755 		break;
2756 
2757 	default:
2758 		bfa_sm_fault(fcport->bfa, event);
2759 	}
2760 }
2761 
2762 /*
2763  * Link state is down
2764  */
2765 static void
2766 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2767 		enum bfa_fcport_ln_sm_event event)
2768 {
2769 	bfa_trc(ln->fcport->bfa, event);
2770 
2771 	switch (event) {
2772 	case BFA_FCPORT_LN_SM_LINKUP:
2773 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2774 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2775 		break;
2776 
2777 	default:
2778 		bfa_sm_fault(ln->fcport->bfa, event);
2779 	}
2780 }
2781 
2782 /*
2783  * Link state is waiting for down notification
2784  */
2785 static void
2786 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2787 		enum bfa_fcport_ln_sm_event event)
2788 {
2789 	bfa_trc(ln->fcport->bfa, event);
2790 
2791 	switch (event) {
2792 	case BFA_FCPORT_LN_SM_LINKUP:
2793 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2794 		break;
2795 
2796 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2797 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2798 		break;
2799 
2800 	default:
2801 		bfa_sm_fault(ln->fcport->bfa, event);
2802 	}
2803 }
2804 
2805 /*
2806  * Link state is waiting for down notification and there is a pending up
2807  */
2808 static void
2809 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2810 		enum bfa_fcport_ln_sm_event event)
2811 {
2812 	bfa_trc(ln->fcport->bfa, event);
2813 
2814 	switch (event) {
2815 	case BFA_FCPORT_LN_SM_LINKDOWN:
2816 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2817 		break;
2818 
2819 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2820 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2821 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2822 		break;
2823 
2824 	default:
2825 		bfa_sm_fault(ln->fcport->bfa, event);
2826 	}
2827 }
2828 
2829 /*
2830  * Link state is up
2831  */
2832 static void
2833 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2834 		enum bfa_fcport_ln_sm_event event)
2835 {
2836 	bfa_trc(ln->fcport->bfa, event);
2837 
2838 	switch (event) {
2839 	case BFA_FCPORT_LN_SM_LINKDOWN:
2840 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2841 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2842 		break;
2843 
2844 	default:
2845 		bfa_sm_fault(ln->fcport->bfa, event);
2846 	}
2847 }
2848 
2849 /*
2850  * Link state is waiting for up notification
2851  */
2852 static void
2853 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2854 		enum bfa_fcport_ln_sm_event event)
2855 {
2856 	bfa_trc(ln->fcport->bfa, event);
2857 
2858 	switch (event) {
2859 	case BFA_FCPORT_LN_SM_LINKDOWN:
2860 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2861 		break;
2862 
2863 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2864 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2865 		break;
2866 
2867 	default:
2868 		bfa_sm_fault(ln->fcport->bfa, event);
2869 	}
2870 }
2871 
2872 /*
2873  * Link state is waiting for up notification and there is a pending down
2874  */
2875 static void
2876 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2877 		enum bfa_fcport_ln_sm_event event)
2878 {
2879 	bfa_trc(ln->fcport->bfa, event);
2880 
2881 	switch (event) {
2882 	case BFA_FCPORT_LN_SM_LINKUP:
2883 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2884 		break;
2885 
2886 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2887 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2888 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2889 		break;
2890 
2891 	default:
2892 		bfa_sm_fault(ln->fcport->bfa, event);
2893 	}
2894 }
2895 
2896 /*
2897  * Link state is waiting for up notification and there are pending down and up
2898  */
2899 static void
2900 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2901 			enum bfa_fcport_ln_sm_event event)
2902 {
2903 	bfa_trc(ln->fcport->bfa, event);
2904 
2905 	switch (event) {
2906 	case BFA_FCPORT_LN_SM_LINKDOWN:
2907 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2908 		break;
2909 
2910 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2911 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2912 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2913 		break;
2914 
2915 	default:
2916 		bfa_sm_fault(ln->fcport->bfa, event);
2917 	}
2918 }
2919 
2920 static void
2921 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2922 {
2923 	struct bfa_fcport_ln_s *ln = cbarg;
2924 
2925 	if (complete)
2926 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2927 	else
2928 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2929 }
2930 
2931 /*
2932  * Send SCN notification to upper layers.
2933  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2934  */
2935 static void
2936 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2937 	bfa_boolean_t trunk)
2938 {
2939 	if (fcport->cfg.trunked && !trunk)
2940 		return;
2941 
2942 	switch (event) {
2943 	case BFA_PORT_LINKUP:
2944 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2945 		break;
2946 	case BFA_PORT_LINKDOWN:
2947 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2948 		break;
2949 	default:
2950 		WARN_ON(1);
2951 	}
2952 }
2953 
2954 static void
2955 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2956 {
2957 	struct bfa_fcport_s *fcport = ln->fcport;
2958 
2959 	if (fcport->bfa->fcs) {
2960 		fcport->event_cbfn(fcport->event_cbarg, event);
2961 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2962 	} else {
2963 		ln->ln_event = event;
2964 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2965 			__bfa_cb_fcport_event, ln);
2966 	}
2967 }
2968 
2969 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2970 							BFA_CACHELINE_SZ))
2971 
2972 void
2973 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2974 		   struct bfa_s *bfa)
2975 {
2976 	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2977 
2978 	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2979 }
2980 
2981 static void
2982 bfa_fcport_qresume(void *cbarg)
2983 {
2984 	struct bfa_fcport_s *fcport = cbarg;
2985 
2986 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2987 }
2988 
2989 static void
2990 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2991 {
2992 	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2993 
2994 	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2995 	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
2996 	fcport->stats = (union bfa_fcport_stats_u *)
2997 				bfa_mem_dma_virt(fcport_dma);
2998 }
2999 
3000 /*
3001  * Memory initialization.
3002  */
3003 void
3004 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3005 		struct bfa_pcidev_s *pcidev)
3006 {
3007 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3008 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3009 	struct bfa_fcport_ln_s *ln = &fcport->ln;
3010 
3011 	fcport->bfa = bfa;
3012 	ln->fcport = fcport;
3013 
3014 	bfa_fcport_mem_claim(fcport);
3015 
3016 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3017 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3018 
3019 	/*
3020 	 * initialize time stamp for stats reset
3021 	 */
3022 	fcport->stats_reset_time = ktime_get_seconds();
3023 	fcport->stats_dma_ready = BFA_FALSE;
3024 
3025 	/*
3026 	 * initialize and set default configuration
3027 	 */
3028 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3029 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
3030 	port_cfg->trunked = BFA_FALSE;
3031 	port_cfg->maxfrsize = 0;
3032 
3033 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3034 	port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3035 	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3036 	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3037 
3038 	fcport->fec_state = BFA_FEC_OFFLINE;
3039 
3040 	INIT_LIST_HEAD(&fcport->stats_pending_q);
3041 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3042 
3043 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3044 }
3045 
3046 void
3047 bfa_fcport_start(struct bfa_s *bfa)
3048 {
3049 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3050 }
3051 
3052 /*
3053  * Called when IOC failure is detected.
3054  */
3055 void
3056 bfa_fcport_iocdisable(struct bfa_s *bfa)
3057 {
3058 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3059 
3060 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3061 	bfa_trunk_iocdisable(bfa);
3062 }
3063 
3064 /*
3065  * Update loop info in fcport for SCN online
3066  */
3067 static void
3068 bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3069 			struct bfa_fcport_loop_info_s *loop_info)
3070 {
3071 	fcport->myalpa = loop_info->myalpa;
3072 	fcport->alpabm_valid =
3073 			loop_info->alpabm_val;
3074 	memcpy(fcport->alpabm.alpa_bm,
3075 			loop_info->alpabm.alpa_bm,
3076 			sizeof(struct fc_alpabm_s));
3077 }
3078 
3079 static void
3080 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3081 {
3082 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3083 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3084 
3085 	fcport->speed = pevent->link_state.speed;
3086 	fcport->topology = pevent->link_state.topology;
3087 
3088 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3089 		bfa_fcport_update_loop_info(fcport,
3090 				&pevent->link_state.attr.loop_info);
3091 		return;
3092 	}
3093 
3094 	/* QoS Details */
3095 	fcport->qos_attr = pevent->link_state.qos_attr;
3096 	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3097 
3098 	if (fcport->cfg.bb_cr_enabled)
3099 		fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3100 
3101 	fcport->fec_state = pevent->link_state.fec_state;
3102 
3103 	/*
3104 	 * update trunk state if applicable
3105 	 */
3106 	if (!fcport->cfg.trunked)
3107 		trunk->attr.state = BFA_TRUNK_DISABLED;
3108 
3109 	/* update FCoE specific */
3110 	fcport->fcoe_vlan =
3111 		be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3112 
3113 	bfa_trc(fcport->bfa, fcport->speed);
3114 	bfa_trc(fcport->bfa, fcport->topology);
3115 }
3116 
3117 static void
3118 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3119 {
3120 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3121 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3122 	fcport->fec_state = BFA_FEC_OFFLINE;
3123 }
3124 
3125 /*
3126  * Send port enable message to firmware.
3127  */
3128 static bfa_boolean_t
3129 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3130 {
3131 	struct bfi_fcport_enable_req_s *m;
3132 
3133 	/*
3134 	 * Increment message tag before queue check, so that responses to old
3135 	 * requests are discarded.
3136 	 */
3137 	fcport->msgtag++;
3138 
3139 	/*
3140 	 * check for room in queue to send request now
3141 	 */
3142 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3143 	if (!m) {
3144 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3145 							&fcport->reqq_wait);
3146 		return BFA_FALSE;
3147 	}
3148 
3149 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3150 			bfa_fn_lpu(fcport->bfa));
3151 	m->nwwn = fcport->nwwn;
3152 	m->pwwn = fcport->pwwn;
3153 	m->port_cfg = fcport->cfg;
3154 	m->msgtag = fcport->msgtag;
3155 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3156 	m->use_flash_cfg = fcport->use_flash_cfg;
3157 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3158 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3159 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3160 
3161 	/*
3162 	 * queue I/O message to firmware
3163 	 */
3164 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3165 	return BFA_TRUE;
3166 }
3167 
3168 /*
3169  * Send port disable message to firmware.
3170  */
3171 static	bfa_boolean_t
3172 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3173 {
3174 	struct bfi_fcport_req_s *m;
3175 
3176 	/*
3177 	 * Increment message tag before queue check, so that responses to old
3178 	 * requests are discarded.
3179 	 */
3180 	fcport->msgtag++;
3181 
3182 	/*
3183 	 * check for room in queue to send request now
3184 	 */
3185 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3186 	if (!m) {
3187 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3188 							&fcport->reqq_wait);
3189 		return BFA_FALSE;
3190 	}
3191 
3192 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3193 			bfa_fn_lpu(fcport->bfa));
3194 	m->msgtag = fcport->msgtag;
3195 
3196 	/*
3197 	 * queue I/O message to firmware
3198 	 */
3199 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3200 
3201 	return BFA_TRUE;
3202 }
3203 
3204 static void
3205 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3206 {
3207 	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3208 	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3209 
3210 	bfa_trc(fcport->bfa, fcport->pwwn);
3211 	bfa_trc(fcport->bfa, fcport->nwwn);
3212 }
3213 
3214 static void
3215 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3216 	struct bfa_qos_stats_s *s)
3217 {
3218 	u32	*dip = (u32 *) d;
3219 	__be32	*sip = (__be32 *) s;
3220 	int		i;
3221 
3222 	/* Now swap the 32 bit fields */
3223 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3224 		dip[i] = be32_to_cpu(sip[i]);
3225 }
3226 
3227 static void
3228 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3229 	struct bfa_fcoe_stats_s *s)
3230 {
3231 	u32	*dip = (u32 *) d;
3232 	__be32	*sip = (__be32 *) s;
3233 	int		i;
3234 
3235 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3236 	     i = i + 2) {
3237 #ifdef __BIG_ENDIAN
3238 		dip[i] = be32_to_cpu(sip[i]);
3239 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3240 #else
3241 		dip[i] = be32_to_cpu(sip[i + 1]);
3242 		dip[i + 1] = be32_to_cpu(sip[i]);
3243 #endif
3244 	}
3245 }
3246 
3247 static void
3248 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3249 {
3250 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3251 	struct bfa_cb_pending_q_s *cb;
3252 	struct list_head *qe, *qen;
3253 	union bfa_fcport_stats_u *ret;
3254 
3255 	if (complete) {
3256 		time64_t time = ktime_get_seconds();
3257 
3258 		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3259 			bfa_q_deq(&fcport->stats_pending_q, &qe);
3260 			cb = (struct bfa_cb_pending_q_s *)qe;
3261 			if (fcport->stats_status == BFA_STATUS_OK) {
3262 				ret = (union bfa_fcport_stats_u *)cb->data;
3263 				/* Swap FC QoS or FCoE stats */
3264 				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3265 					bfa_fcport_qos_stats_swap(&ret->fcqos,
3266 							&fcport->stats->fcqos);
3267 				else {
3268 					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3269 							&fcport->stats->fcoe);
3270 					ret->fcoe.secs_reset =
3271 						time - fcport->stats_reset_time;
3272 				}
3273 			}
3274 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3275 					fcport->stats_status);
3276 		}
3277 		fcport->stats_status = BFA_STATUS_OK;
3278 	} else {
3279 		INIT_LIST_HEAD(&fcport->stats_pending_q);
3280 		fcport->stats_status = BFA_STATUS_OK;
3281 	}
3282 }
3283 
3284 static void
3285 bfa_fcport_stats_get_timeout(void *cbarg)
3286 {
3287 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3288 
3289 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3290 
3291 	if (fcport->stats_qfull) {
3292 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3293 		fcport->stats_qfull = BFA_FALSE;
3294 	}
3295 
3296 	fcport->stats_status = BFA_STATUS_ETIMER;
3297 	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3298 }
3299 
3300 static void
3301 bfa_fcport_send_stats_get(void *cbarg)
3302 {
3303 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3304 	struct bfi_fcport_req_s *msg;
3305 
3306 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3307 
3308 	if (!msg) {
3309 		fcport->stats_qfull = BFA_TRUE;
3310 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3311 				bfa_fcport_send_stats_get, fcport);
3312 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3313 				&fcport->stats_reqq_wait);
3314 		return;
3315 	}
3316 	fcport->stats_qfull = BFA_FALSE;
3317 
3318 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3319 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3320 			bfa_fn_lpu(fcport->bfa));
3321 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3322 }
3323 
3324 static void
3325 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3326 {
3327 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3328 	struct bfa_cb_pending_q_s *cb;
3329 	struct list_head *qe, *qen;
3330 
3331 	if (complete) {
3332 		/*
3333 		 * re-initialize time stamp for stats reset
3334 		 */
3335 		fcport->stats_reset_time = ktime_get_seconds();
3336 		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3337 			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3338 			cb = (struct bfa_cb_pending_q_s *)qe;
3339 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3340 						fcport->stats_status);
3341 		}
3342 		fcport->stats_status = BFA_STATUS_OK;
3343 	} else {
3344 		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3345 		fcport->stats_status = BFA_STATUS_OK;
3346 	}
3347 }
3348 
3349 static void
3350 bfa_fcport_stats_clr_timeout(void *cbarg)
3351 {
3352 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3353 
3354 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3355 
3356 	if (fcport->stats_qfull) {
3357 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3358 		fcport->stats_qfull = BFA_FALSE;
3359 	}
3360 
3361 	fcport->stats_status = BFA_STATUS_ETIMER;
3362 	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3363 }
3364 
3365 static void
3366 bfa_fcport_send_stats_clear(void *cbarg)
3367 {
3368 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3369 	struct bfi_fcport_req_s *msg;
3370 
3371 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3372 
3373 	if (!msg) {
3374 		fcport->stats_qfull = BFA_TRUE;
3375 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3376 				bfa_fcport_send_stats_clear, fcport);
3377 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3378 						&fcport->stats_reqq_wait);
3379 		return;
3380 	}
3381 	fcport->stats_qfull = BFA_FALSE;
3382 
3383 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3384 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3385 			bfa_fn_lpu(fcport->bfa));
3386 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3387 }
3388 
3389 /*
3390  * Handle trunk SCN event from firmware.
3391  */
3392 static void
3393 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3394 {
3395 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3396 	struct bfi_fcport_trunk_link_s *tlink;
3397 	struct bfa_trunk_link_attr_s *lattr;
3398 	enum bfa_trunk_state state_prev;
3399 	int i;
3400 	int link_bm = 0;
3401 
3402 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3403 	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3404 		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3405 
3406 	bfa_trc(fcport->bfa, trunk->attr.state);
3407 	bfa_trc(fcport->bfa, scn->trunk_state);
3408 	bfa_trc(fcport->bfa, scn->trunk_speed);
3409 
3410 	/*
3411 	 * Save off new state for trunk attribute query
3412 	 */
3413 	state_prev = trunk->attr.state;
3414 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3415 		trunk->attr.state = scn->trunk_state;
3416 	trunk->attr.speed = scn->trunk_speed;
3417 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3418 		lattr = &trunk->attr.link_attr[i];
3419 		tlink = &scn->tlink[i];
3420 
3421 		lattr->link_state = tlink->state;
3422 		lattr->trunk_wwn  = tlink->trunk_wwn;
3423 		lattr->fctl	  = tlink->fctl;
3424 		lattr->speed	  = tlink->speed;
3425 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3426 
3427 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3428 			fcport->speed	 = tlink->speed;
3429 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3430 			link_bm |= 1 << i;
3431 		}
3432 
3433 		bfa_trc(fcport->bfa, lattr->link_state);
3434 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3435 		bfa_trc(fcport->bfa, lattr->fctl);
3436 		bfa_trc(fcport->bfa, lattr->speed);
3437 		bfa_trc(fcport->bfa, lattr->deskew);
3438 	}
3439 
3440 	switch (link_bm) {
3441 	case 3:
3442 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3443 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3444 		break;
3445 	case 2:
3446 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3447 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3448 		break;
3449 	case 1:
3450 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3451 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3452 		break;
3453 	default:
3454 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3455 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3456 	}
3457 
3458 	/*
3459 	 * Notify upper layers if trunk state changed.
3460 	 */
3461 	if ((state_prev != trunk->attr.state) ||
3462 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3463 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3464 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3465 	}
3466 }
3467 
3468 static void
3469 bfa_trunk_iocdisable(struct bfa_s *bfa)
3470 {
3471 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3472 	int i = 0;
3473 
3474 	/*
3475 	 * In trunked mode, notify upper layers that link is down
3476 	 */
3477 	if (fcport->cfg.trunked) {
3478 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3479 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3480 
3481 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3482 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3483 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3484 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3485 			fcport->trunk.attr.link_attr[i].fctl =
3486 						BFA_TRUNK_LINK_FCTL_NORMAL;
3487 			fcport->trunk.attr.link_attr[i].link_state =
3488 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3489 			fcport->trunk.attr.link_attr[i].speed =
3490 						BFA_PORT_SPEED_UNKNOWN;
3491 			fcport->trunk.attr.link_attr[i].deskew = 0;
3492 		}
3493 	}
3494 }
3495 
3496 /*
3497  * Called to initialize port attributes
3498  */
3499 void
3500 bfa_fcport_init(struct bfa_s *bfa)
3501 {
3502 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3503 
3504 	/*
3505 	 * Initialize port attributes from IOC hardware data.
3506 	 */
3507 	bfa_fcport_set_wwns(fcport);
3508 	if (fcport->cfg.maxfrsize == 0)
3509 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3510 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3511 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3512 
3513 	if (bfa_fcport_is_pbcdisabled(bfa))
3514 		bfa->modules.port.pbc_disabled = BFA_TRUE;
3515 
3516 	WARN_ON(!fcport->cfg.maxfrsize);
3517 	WARN_ON(!fcport->cfg.rx_bbcredit);
3518 	WARN_ON(!fcport->speed_sup);
3519 }
3520 
3521 /*
3522  * Firmware message handler.
3523  */
3524 void
3525 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3526 {
3527 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3528 	union bfi_fcport_i2h_msg_u i2hmsg;
3529 
3530 	i2hmsg.msg = msg;
3531 	fcport->event_arg.i2hmsg = i2hmsg;
3532 
3533 	bfa_trc(bfa, msg->mhdr.msg_id);
3534 	bfa_trc(bfa, bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm));
3535 
3536 	switch (msg->mhdr.msg_id) {
3537 	case BFI_FCPORT_I2H_ENABLE_RSP:
3538 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3539 
3540 			fcport->stats_dma_ready = BFA_TRUE;
3541 			if (fcport->use_flash_cfg) {
3542 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3543 				fcport->cfg.maxfrsize =
3544 					cpu_to_be16(fcport->cfg.maxfrsize);
3545 				fcport->cfg.path_tov =
3546 					cpu_to_be16(fcport->cfg.path_tov);
3547 				fcport->cfg.q_depth =
3548 					cpu_to_be16(fcport->cfg.q_depth);
3549 
3550 				if (fcport->cfg.trunked)
3551 					fcport->trunk.attr.state =
3552 						BFA_TRUNK_OFFLINE;
3553 				else
3554 					fcport->trunk.attr.state =
3555 						BFA_TRUNK_DISABLED;
3556 				fcport->qos_attr.qos_bw =
3557 					i2hmsg.penable_rsp->port_cfg.qos_bw;
3558 				fcport->use_flash_cfg = BFA_FALSE;
3559 			}
3560 
3561 			if (fcport->cfg.qos_enabled)
3562 				fcport->qos_attr.state = BFA_QOS_OFFLINE;
3563 			else
3564 				fcport->qos_attr.state = BFA_QOS_DISABLED;
3565 
3566 			fcport->qos_attr.qos_bw_op =
3567 					i2hmsg.penable_rsp->port_cfg.qos_bw;
3568 
3569 			if (fcport->cfg.bb_cr_enabled)
3570 				fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3571 			else
3572 				fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3573 
3574 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3575 		}
3576 		break;
3577 
3578 	case BFI_FCPORT_I2H_DISABLE_RSP:
3579 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3580 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3581 		break;
3582 
3583 	case BFI_FCPORT_I2H_EVENT:
3584 		if (fcport->cfg.bb_cr_enabled)
3585 			fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3586 		else
3587 			fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3588 
3589 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3590 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3591 		else {
3592 			if (i2hmsg.event->link_state.linkstate_rsn ==
3593 			    BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3594 				bfa_sm_send_event(fcport,
3595 						  BFA_FCPORT_SM_FAA_MISCONFIG);
3596 			else
3597 				bfa_sm_send_event(fcport,
3598 						  BFA_FCPORT_SM_LINKDOWN);
3599 		}
3600 		fcport->qos_attr.qos_bw_op =
3601 				i2hmsg.event->link_state.qos_attr.qos_bw_op;
3602 		break;
3603 
3604 	case BFI_FCPORT_I2H_TRUNK_SCN:
3605 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3606 		break;
3607 
3608 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3609 		/*
3610 		 * check for timer pop before processing the rsp
3611 		 */
3612 		if (list_empty(&fcport->stats_pending_q) ||
3613 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3614 			break;
3615 
3616 		bfa_timer_stop(&fcport->timer);
3617 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3618 		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3619 		break;
3620 
3621 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3622 		/*
3623 		 * check for timer pop before processing the rsp
3624 		 */
3625 		if (list_empty(&fcport->statsclr_pending_q) ||
3626 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3627 			break;
3628 
3629 		bfa_timer_stop(&fcport->timer);
3630 		fcport->stats_status = BFA_STATUS_OK;
3631 		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3632 		break;
3633 
3634 	case BFI_FCPORT_I2H_ENABLE_AEN:
3635 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3636 		break;
3637 
3638 	case BFI_FCPORT_I2H_DISABLE_AEN:
3639 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3640 		break;
3641 
3642 	default:
3643 		WARN_ON(1);
3644 	break;
3645 	}
3646 }
3647 
3648 /*
3649  * Registered callback for port events.
3650  */
3651 void
3652 bfa_fcport_event_register(struct bfa_s *bfa,
3653 				void (*cbfn) (void *cbarg,
3654 				enum bfa_port_linkstate event),
3655 				void *cbarg)
3656 {
3657 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3658 
3659 	fcport->event_cbfn = cbfn;
3660 	fcport->event_cbarg = cbarg;
3661 }
3662 
3663 bfa_status_t
3664 bfa_fcport_enable(struct bfa_s *bfa)
3665 {
3666 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3667 
3668 	if (bfa_fcport_is_pbcdisabled(bfa))
3669 		return BFA_STATUS_PBC;
3670 
3671 	if (bfa_ioc_is_disabled(&bfa->ioc))
3672 		return BFA_STATUS_IOC_DISABLED;
3673 
3674 	if (fcport->diag_busy)
3675 		return BFA_STATUS_DIAG_BUSY;
3676 
3677 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3678 	return BFA_STATUS_OK;
3679 }
3680 
3681 bfa_status_t
3682 bfa_fcport_disable(struct bfa_s *bfa)
3683 {
3684 	if (bfa_fcport_is_pbcdisabled(bfa))
3685 		return BFA_STATUS_PBC;
3686 
3687 	if (bfa_ioc_is_disabled(&bfa->ioc))
3688 		return BFA_STATUS_IOC_DISABLED;
3689 
3690 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3691 	return BFA_STATUS_OK;
3692 }
3693 
3694 /* If PBC is disabled on port, return error */
3695 bfa_status_t
3696 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3697 {
3698 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3699 	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3700 	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3701 
3702 	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3703 		bfa_trc(bfa, fcport->pwwn);
3704 		return BFA_STATUS_PBC;
3705 	}
3706 	return BFA_STATUS_OK;
3707 }
3708 
3709 /*
3710  * Configure port speed.
3711  */
3712 bfa_status_t
3713 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3714 {
3715 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3716 
3717 	bfa_trc(bfa, speed);
3718 
3719 	if (fcport->cfg.trunked == BFA_TRUE)
3720 		return BFA_STATUS_TRUNK_ENABLED;
3721 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3722 			(speed == BFA_PORT_SPEED_16GBPS))
3723 		return BFA_STATUS_UNSUPP_SPEED;
3724 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3725 		bfa_trc(bfa, fcport->speed_sup);
3726 		return BFA_STATUS_UNSUPP_SPEED;
3727 	}
3728 
3729 	/* Port speed entered needs to be checked */
3730 	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3731 		/* For CT2, 1G is not supported */
3732 		if ((speed == BFA_PORT_SPEED_1GBPS) &&
3733 		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3734 			return BFA_STATUS_UNSUPP_SPEED;
3735 
3736 		/* Already checked for Auto Speed and Max Speed supp */
3737 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
3738 		      speed == BFA_PORT_SPEED_2GBPS ||
3739 		      speed == BFA_PORT_SPEED_4GBPS ||
3740 		      speed == BFA_PORT_SPEED_8GBPS ||
3741 		      speed == BFA_PORT_SPEED_16GBPS ||
3742 		      speed == BFA_PORT_SPEED_AUTO))
3743 			return BFA_STATUS_UNSUPP_SPEED;
3744 	} else {
3745 		if (speed != BFA_PORT_SPEED_10GBPS)
3746 			return BFA_STATUS_UNSUPP_SPEED;
3747 	}
3748 
3749 	fcport->cfg.speed = speed;
3750 
3751 	return BFA_STATUS_OK;
3752 }
3753 
3754 /*
3755  * Get current speed.
3756  */
3757 enum bfa_port_speed
3758 bfa_fcport_get_speed(struct bfa_s *bfa)
3759 {
3760 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3761 
3762 	return fcport->speed;
3763 }
3764 
3765 /*
3766  * Configure port topology.
3767  */
3768 bfa_status_t
3769 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3770 {
3771 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3772 
3773 	bfa_trc(bfa, topology);
3774 	bfa_trc(bfa, fcport->cfg.topology);
3775 
3776 	switch (topology) {
3777 	case BFA_PORT_TOPOLOGY_P2P:
3778 		break;
3779 
3780 	case BFA_PORT_TOPOLOGY_LOOP:
3781 		if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3782 			(fcport->qos_attr.state != BFA_QOS_DISABLED))
3783 			return BFA_STATUS_ERROR_QOS_ENABLED;
3784 		if (fcport->cfg.ratelimit != BFA_FALSE)
3785 			return BFA_STATUS_ERROR_TRL_ENABLED;
3786 		if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3787 			(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3788 			return BFA_STATUS_ERROR_TRUNK_ENABLED;
3789 		if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3790 			(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3791 			return BFA_STATUS_UNSUPP_SPEED;
3792 		if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3793 			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3794 		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3795 			return BFA_STATUS_DPORT_ERR;
3796 		if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3797 			return BFA_STATUS_DPORT_ERR;
3798 		break;
3799 
3800 	case BFA_PORT_TOPOLOGY_AUTO:
3801 		break;
3802 
3803 	default:
3804 		return BFA_STATUS_EINVAL;
3805 	}
3806 
3807 	fcport->cfg.topology = topology;
3808 	return BFA_STATUS_OK;
3809 }
3810 
3811 /*
3812  * Get current topology.
3813  */
3814 enum bfa_port_topology
3815 bfa_fcport_get_topology(struct bfa_s *bfa)
3816 {
3817 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3818 
3819 	return fcport->topology;
3820 }
3821 
3822 /*
3823  * Get config topology.
3824  */
3825 enum bfa_port_topology
3826 bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3827 {
3828 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3829 
3830 	return fcport->cfg.topology;
3831 }
3832 
3833 bfa_status_t
3834 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3835 {
3836 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3837 
3838 	bfa_trc(bfa, alpa);
3839 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3840 	bfa_trc(bfa, fcport->cfg.hardalpa);
3841 
3842 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3843 	fcport->cfg.hardalpa = alpa;
3844 
3845 	return BFA_STATUS_OK;
3846 }
3847 
3848 bfa_status_t
3849 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3850 {
3851 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3852 
3853 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3854 	bfa_trc(bfa, fcport->cfg.hardalpa);
3855 
3856 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3857 	return BFA_STATUS_OK;
3858 }
3859 
3860 bfa_boolean_t
3861 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3862 {
3863 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3864 
3865 	*alpa = fcport->cfg.hardalpa;
3866 	return fcport->cfg.cfg_hardalpa;
3867 }
3868 
3869 u8
3870 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3871 {
3872 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3873 
3874 	return fcport->myalpa;
3875 }
3876 
3877 bfa_status_t
3878 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3879 {
3880 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3881 
3882 	bfa_trc(bfa, maxfrsize);
3883 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3884 
3885 	/* with in range */
3886 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3887 		return BFA_STATUS_INVLD_DFSZ;
3888 
3889 	/* power of 2, if not the max frame size of 2112 */
3890 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3891 		return BFA_STATUS_INVLD_DFSZ;
3892 
3893 	fcport->cfg.maxfrsize = maxfrsize;
3894 	return BFA_STATUS_OK;
3895 }
3896 
3897 u16
3898 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3899 {
3900 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3901 
3902 	return fcport->cfg.maxfrsize;
3903 }
3904 
3905 u8
3906 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3907 {
3908 	if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3909 		return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3910 
3911 	else
3912 		return 0;
3913 }
3914 
3915 void
3916 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3917 {
3918 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3919 
3920 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3921 }
3922 
3923 /*
3924  * Get port attributes.
3925  */
3926 
3927 wwn_t
3928 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3929 {
3930 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3931 	if (node)
3932 		return fcport->nwwn;
3933 	else
3934 		return fcport->pwwn;
3935 }
3936 
3937 void
3938 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3939 {
3940 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3941 
3942 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3943 
3944 	attr->nwwn = fcport->nwwn;
3945 	attr->pwwn = fcport->pwwn;
3946 
3947 	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3948 	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3949 
3950 	memcpy(&attr->pport_cfg, &fcport->cfg,
3951 		sizeof(struct bfa_port_cfg_s));
3952 	/* speed attributes */
3953 	attr->pport_cfg.speed = fcport->cfg.speed;
3954 	attr->speed_supported = fcport->speed_sup;
3955 	attr->speed = fcport->speed;
3956 	attr->cos_supported = FC_CLASS_3;
3957 
3958 	/* topology attributes */
3959 	attr->pport_cfg.topology = fcport->cfg.topology;
3960 	attr->topology = fcport->topology;
3961 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3962 
3963 	/* beacon attributes */
3964 	attr->beacon = fcport->beacon;
3965 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3966 
3967 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3968 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3969 	attr->port_state = bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm);
3970 
3971 	attr->fec_state = fcport->fec_state;
3972 
3973 	/* PBC Disabled State */
3974 	if (bfa_fcport_is_pbcdisabled(bfa))
3975 		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3976 	else {
3977 		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3978 			attr->port_state = BFA_PORT_ST_IOCDIS;
3979 		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3980 			attr->port_state = BFA_PORT_ST_FWMISMATCH;
3981 	}
3982 
3983 	/* FCoE vlan */
3984 	attr->fcoe_vlan = fcport->fcoe_vlan;
3985 }
3986 
3987 #define BFA_FCPORT_STATS_TOV	1000
3988 
3989 /*
3990  * Fetch port statistics (FCQoS or FCoE).
3991  */
3992 bfa_status_t
3993 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3994 {
3995 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3996 
3997 	if (!bfa_iocfc_is_operational(bfa) ||
3998 	    !fcport->stats_dma_ready)
3999 		return BFA_STATUS_IOC_NON_OP;
4000 
4001 	if (!list_empty(&fcport->statsclr_pending_q))
4002 		return BFA_STATUS_DEVBUSY;
4003 
4004 	if (list_empty(&fcport->stats_pending_q)) {
4005 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4006 		bfa_fcport_send_stats_get(fcport);
4007 		bfa_timer_start(bfa, &fcport->timer,
4008 				bfa_fcport_stats_get_timeout,
4009 				fcport, BFA_FCPORT_STATS_TOV);
4010 	} else
4011 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4012 
4013 	return BFA_STATUS_OK;
4014 }
4015 
4016 /*
4017  * Reset port statistics (FCQoS or FCoE).
4018  */
4019 bfa_status_t
4020 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4021 {
4022 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4023 
4024 	if (!bfa_iocfc_is_operational(bfa) ||
4025 	    !fcport->stats_dma_ready)
4026 		return BFA_STATUS_IOC_NON_OP;
4027 
4028 	if (!list_empty(&fcport->stats_pending_q))
4029 		return BFA_STATUS_DEVBUSY;
4030 
4031 	if (list_empty(&fcport->statsclr_pending_q)) {
4032 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4033 		bfa_fcport_send_stats_clear(fcport);
4034 		bfa_timer_start(bfa, &fcport->timer,
4035 				bfa_fcport_stats_clr_timeout,
4036 				fcport, BFA_FCPORT_STATS_TOV);
4037 	} else
4038 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4039 
4040 	return BFA_STATUS_OK;
4041 }
4042 
4043 /*
4044  * Fetch port attributes.
4045  */
4046 bfa_boolean_t
4047 bfa_fcport_is_disabled(struct bfa_s *bfa)
4048 {
4049 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4050 
4051 	return bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
4052 		BFA_PORT_ST_DISABLED;
4053 
4054 }
4055 
4056 bfa_boolean_t
4057 bfa_fcport_is_dport(struct bfa_s *bfa)
4058 {
4059 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4060 
4061 	return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
4062 		BFA_PORT_ST_DPORT);
4063 }
4064 
4065 bfa_boolean_t
4066 bfa_fcport_is_ddport(struct bfa_s *bfa)
4067 {
4068 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4069 
4070 	return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
4071 		BFA_PORT_ST_DDPORT);
4072 }
4073 
4074 bfa_status_t
4075 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4076 {
4077 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4078 	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4079 
4080 	bfa_trc(bfa, ioc_type);
4081 
4082 	if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4083 		return BFA_STATUS_QOS_BW_INVALID;
4084 
4085 	if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4086 		return BFA_STATUS_QOS_BW_INVALID;
4087 
4088 	if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4089 	    (qos_bw->low > qos_bw->high))
4090 		return BFA_STATUS_QOS_BW_INVALID;
4091 
4092 	if ((ioc_type == BFA_IOC_TYPE_FC) &&
4093 	    (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4094 		fcport->cfg.qos_bw = *qos_bw;
4095 
4096 	return BFA_STATUS_OK;
4097 }
4098 
4099 bfa_boolean_t
4100 bfa_fcport_is_ratelim(struct bfa_s *bfa)
4101 {
4102 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4103 
4104 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4105 
4106 }
4107 
4108 /*
4109  *	Enable/Disable FAA feature in port config
4110  */
4111 void
4112 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4113 {
4114 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4115 
4116 	bfa_trc(bfa, state);
4117 	fcport->cfg.faa_state = state;
4118 }
4119 
4120 /*
4121  * Get default minimum ratelim speed
4122  */
4123 enum bfa_port_speed
4124 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4125 {
4126 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4127 
4128 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
4129 	return fcport->cfg.trl_def_speed;
4130 
4131 }
4132 
4133 void
4134 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4135 		  bfa_boolean_t link_e2e_beacon)
4136 {
4137 	struct bfa_s *bfa = dev;
4138 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4139 
4140 	bfa_trc(bfa, beacon);
4141 	bfa_trc(bfa, link_e2e_beacon);
4142 	bfa_trc(bfa, fcport->beacon);
4143 	bfa_trc(bfa, fcport->link_e2e_beacon);
4144 
4145 	fcport->beacon = beacon;
4146 	fcport->link_e2e_beacon = link_e2e_beacon;
4147 }
4148 
4149 bfa_boolean_t
4150 bfa_fcport_is_linkup(struct bfa_s *bfa)
4151 {
4152 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4153 
4154 	return	(!fcport->cfg.trunked &&
4155 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4156 		(fcport->cfg.trunked &&
4157 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4158 }
4159 
4160 bfa_boolean_t
4161 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4162 {
4163 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4164 
4165 	return fcport->cfg.qos_enabled;
4166 }
4167 
4168 bfa_boolean_t
4169 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4170 {
4171 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4172 
4173 	return fcport->cfg.trunked;
4174 }
4175 
4176 bfa_status_t
4177 bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4178 {
4179 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4180 
4181 	bfa_trc(bfa, on_off);
4182 
4183 	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4184 		return BFA_STATUS_BBCR_FC_ONLY;
4185 
4186 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4187 		(bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4188 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4189 
4190 	if (on_off) {
4191 		if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4192 			return BFA_STATUS_TOPOLOGY_LOOP;
4193 
4194 		if (fcport->cfg.qos_enabled)
4195 			return BFA_STATUS_ERROR_QOS_ENABLED;
4196 
4197 		if (fcport->cfg.trunked)
4198 			return BFA_STATUS_TRUNK_ENABLED;
4199 
4200 		if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4201 			(fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4202 			return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4203 
4204 		if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4205 			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4206 
4207 		if (fcport->cfg.bb_cr_enabled) {
4208 			if (bb_scn != fcport->cfg.bb_scn)
4209 				return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4210 			else
4211 				return BFA_STATUS_NO_CHANGE;
4212 		}
4213 
4214 		if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4215 			bb_scn = BFA_BB_SCN_DEF;
4216 
4217 		fcport->cfg.bb_cr_enabled = on_off;
4218 		fcport->cfg.bb_scn = bb_scn;
4219 	} else {
4220 		if (!fcport->cfg.bb_cr_enabled)
4221 			return BFA_STATUS_NO_CHANGE;
4222 
4223 		fcport->cfg.bb_cr_enabled = on_off;
4224 		fcport->cfg.bb_scn = 0;
4225 	}
4226 
4227 	return BFA_STATUS_OK;
4228 }
4229 
4230 bfa_status_t
4231 bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4232 		struct bfa_bbcr_attr_s *bbcr_attr)
4233 {
4234 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4235 
4236 	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4237 		return BFA_STATUS_BBCR_FC_ONLY;
4238 
4239 	if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4240 		return BFA_STATUS_TOPOLOGY_LOOP;
4241 
4242 	*bbcr_attr = fcport->bbcr_attr;
4243 
4244 	return BFA_STATUS_OK;
4245 }
4246 
4247 void
4248 bfa_fcport_dportenable(struct bfa_s *bfa)
4249 {
4250 	/*
4251 	 * Assume caller check for port is in disable state
4252 	 */
4253 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4254 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4255 }
4256 
4257 void
4258 bfa_fcport_dportdisable(struct bfa_s *bfa)
4259 {
4260 	/*
4261 	 * Assume caller check for port is in disable state
4262 	 */
4263 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4264 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4265 }
4266 
4267 static void
4268 bfa_fcport_ddportenable(struct bfa_s *bfa)
4269 {
4270 	/*
4271 	 * Assume caller check for port is in disable state
4272 	 */
4273 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4274 }
4275 
4276 static void
4277 bfa_fcport_ddportdisable(struct bfa_s *bfa)
4278 {
4279 	/*
4280 	 * Assume caller check for port is in disable state
4281 	 */
4282 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4283 }
4284 
4285 /*
4286  * Rport State machine functions
4287  */
4288 /*
4289  * Beginning state, only online event expected.
4290  */
4291 static void
4292 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4293 {
4294 	bfa_trc(rp->bfa, rp->rport_tag);
4295 	bfa_trc(rp->bfa, event);
4296 
4297 	switch (event) {
4298 	case BFA_RPORT_SM_CREATE:
4299 		bfa_stats(rp, sm_un_cr);
4300 		bfa_sm_set_state(rp, bfa_rport_sm_created);
4301 		break;
4302 
4303 	default:
4304 		bfa_stats(rp, sm_un_unexp);
4305 		bfa_sm_fault(rp->bfa, event);
4306 	}
4307 }
4308 
4309 static void
4310 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4311 {
4312 	bfa_trc(rp->bfa, rp->rport_tag);
4313 	bfa_trc(rp->bfa, event);
4314 
4315 	switch (event) {
4316 	case BFA_RPORT_SM_ONLINE:
4317 		bfa_stats(rp, sm_cr_on);
4318 		if (bfa_rport_send_fwcreate(rp))
4319 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4320 		else
4321 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4322 		break;
4323 
4324 	case BFA_RPORT_SM_DELETE:
4325 		bfa_stats(rp, sm_cr_del);
4326 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4327 		bfa_rport_free(rp);
4328 		break;
4329 
4330 	case BFA_RPORT_SM_HWFAIL:
4331 		bfa_stats(rp, sm_cr_hwf);
4332 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4333 		break;
4334 
4335 	default:
4336 		bfa_stats(rp, sm_cr_unexp);
4337 		bfa_sm_fault(rp->bfa, event);
4338 	}
4339 }
4340 
4341 /*
4342  * Waiting for rport create response from firmware.
4343  */
4344 static void
4345 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4346 {
4347 	bfa_trc(rp->bfa, rp->rport_tag);
4348 	bfa_trc(rp->bfa, event);
4349 
4350 	switch (event) {
4351 	case BFA_RPORT_SM_FWRSP:
4352 		bfa_stats(rp, sm_fwc_rsp);
4353 		bfa_sm_set_state(rp, bfa_rport_sm_online);
4354 		bfa_rport_online_cb(rp);
4355 		break;
4356 
4357 	case BFA_RPORT_SM_DELETE:
4358 		bfa_stats(rp, sm_fwc_del);
4359 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4360 		break;
4361 
4362 	case BFA_RPORT_SM_OFFLINE:
4363 		bfa_stats(rp, sm_fwc_off);
4364 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4365 		break;
4366 
4367 	case BFA_RPORT_SM_HWFAIL:
4368 		bfa_stats(rp, sm_fwc_hwf);
4369 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4370 		break;
4371 
4372 	default:
4373 		bfa_stats(rp, sm_fwc_unexp);
4374 		bfa_sm_fault(rp->bfa, event);
4375 	}
4376 }
4377 
4378 /*
4379  * Request queue is full, awaiting queue resume to send create request.
4380  */
4381 static void
4382 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4383 {
4384 	bfa_trc(rp->bfa, rp->rport_tag);
4385 	bfa_trc(rp->bfa, event);
4386 
4387 	switch (event) {
4388 	case BFA_RPORT_SM_QRESUME:
4389 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4390 		bfa_rport_send_fwcreate(rp);
4391 		break;
4392 
4393 	case BFA_RPORT_SM_DELETE:
4394 		bfa_stats(rp, sm_fwc_del);
4395 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4396 		bfa_reqq_wcancel(&rp->reqq_wait);
4397 		bfa_rport_free(rp);
4398 		break;
4399 
4400 	case BFA_RPORT_SM_OFFLINE:
4401 		bfa_stats(rp, sm_fwc_off);
4402 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4403 		bfa_reqq_wcancel(&rp->reqq_wait);
4404 		bfa_rport_offline_cb(rp);
4405 		break;
4406 
4407 	case BFA_RPORT_SM_HWFAIL:
4408 		bfa_stats(rp, sm_fwc_hwf);
4409 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4410 		bfa_reqq_wcancel(&rp->reqq_wait);
4411 		break;
4412 
4413 	default:
4414 		bfa_stats(rp, sm_fwc_unexp);
4415 		bfa_sm_fault(rp->bfa, event);
4416 	}
4417 }
4418 
4419 /*
4420  * Online state - normal parking state.
4421  */
4422 static void
4423 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4424 {
4425 	struct bfi_rport_qos_scn_s *qos_scn;
4426 
4427 	bfa_trc(rp->bfa, rp->rport_tag);
4428 	bfa_trc(rp->bfa, event);
4429 
4430 	switch (event) {
4431 	case BFA_RPORT_SM_OFFLINE:
4432 		bfa_stats(rp, sm_on_off);
4433 		if (bfa_rport_send_fwdelete(rp))
4434 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4435 		else
4436 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4437 		break;
4438 
4439 	case BFA_RPORT_SM_DELETE:
4440 		bfa_stats(rp, sm_on_del);
4441 		if (bfa_rport_send_fwdelete(rp))
4442 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4443 		else
4444 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4445 		break;
4446 
4447 	case BFA_RPORT_SM_HWFAIL:
4448 		bfa_stats(rp, sm_on_hwf);
4449 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4450 		break;
4451 
4452 	case BFA_RPORT_SM_SET_SPEED:
4453 		bfa_rport_send_fwspeed(rp);
4454 		break;
4455 
4456 	case BFA_RPORT_SM_QOS_SCN:
4457 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4458 		rp->qos_attr = qos_scn->new_qos_attr;
4459 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4460 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4461 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4462 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4463 
4464 		qos_scn->old_qos_attr.qos_flow_id  =
4465 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4466 		qos_scn->new_qos_attr.qos_flow_id  =
4467 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4468 
4469 		if (qos_scn->old_qos_attr.qos_flow_id !=
4470 			qos_scn->new_qos_attr.qos_flow_id)
4471 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4472 						    qos_scn->old_qos_attr,
4473 						    qos_scn->new_qos_attr);
4474 		if (qos_scn->old_qos_attr.qos_priority !=
4475 			qos_scn->new_qos_attr.qos_priority)
4476 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4477 						  qos_scn->old_qos_attr,
4478 						  qos_scn->new_qos_attr);
4479 		break;
4480 
4481 	default:
4482 		bfa_stats(rp, sm_on_unexp);
4483 		bfa_sm_fault(rp->bfa, event);
4484 	}
4485 }
4486 
4487 /*
4488  * Firmware rport is being deleted - awaiting f/w response.
4489  */
4490 static void
4491 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4492 {
4493 	bfa_trc(rp->bfa, rp->rport_tag);
4494 	bfa_trc(rp->bfa, event);
4495 
4496 	switch (event) {
4497 	case BFA_RPORT_SM_FWRSP:
4498 		bfa_stats(rp, sm_fwd_rsp);
4499 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4500 		bfa_rport_offline_cb(rp);
4501 		break;
4502 
4503 	case BFA_RPORT_SM_DELETE:
4504 		bfa_stats(rp, sm_fwd_del);
4505 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4506 		break;
4507 
4508 	case BFA_RPORT_SM_HWFAIL:
4509 		bfa_stats(rp, sm_fwd_hwf);
4510 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4511 		bfa_rport_offline_cb(rp);
4512 		break;
4513 
4514 	default:
4515 		bfa_stats(rp, sm_fwd_unexp);
4516 		bfa_sm_fault(rp->bfa, event);
4517 	}
4518 }
4519 
4520 static void
4521 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4522 {
4523 	bfa_trc(rp->bfa, rp->rport_tag);
4524 	bfa_trc(rp->bfa, event);
4525 
4526 	switch (event) {
4527 	case BFA_RPORT_SM_QRESUME:
4528 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4529 		bfa_rport_send_fwdelete(rp);
4530 		break;
4531 
4532 	case BFA_RPORT_SM_DELETE:
4533 		bfa_stats(rp, sm_fwd_del);
4534 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4535 		break;
4536 
4537 	case BFA_RPORT_SM_HWFAIL:
4538 		bfa_stats(rp, sm_fwd_hwf);
4539 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4540 		bfa_reqq_wcancel(&rp->reqq_wait);
4541 		bfa_rport_offline_cb(rp);
4542 		break;
4543 
4544 	default:
4545 		bfa_stats(rp, sm_fwd_unexp);
4546 		bfa_sm_fault(rp->bfa, event);
4547 	}
4548 }
4549 
4550 /*
4551  * Offline state.
4552  */
4553 static void
4554 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4555 {
4556 	bfa_trc(rp->bfa, rp->rport_tag);
4557 	bfa_trc(rp->bfa, event);
4558 
4559 	switch (event) {
4560 	case BFA_RPORT_SM_DELETE:
4561 		bfa_stats(rp, sm_off_del);
4562 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4563 		bfa_rport_free(rp);
4564 		break;
4565 
4566 	case BFA_RPORT_SM_ONLINE:
4567 		bfa_stats(rp, sm_off_on);
4568 		if (bfa_rport_send_fwcreate(rp))
4569 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4570 		else
4571 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4572 		break;
4573 
4574 	case BFA_RPORT_SM_HWFAIL:
4575 		bfa_stats(rp, sm_off_hwf);
4576 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4577 		break;
4578 
4579 	case BFA_RPORT_SM_OFFLINE:
4580 		bfa_rport_offline_cb(rp);
4581 		break;
4582 
4583 	default:
4584 		bfa_stats(rp, sm_off_unexp);
4585 		bfa_sm_fault(rp->bfa, event);
4586 	}
4587 }
4588 
4589 /*
4590  * Rport is deleted, waiting for firmware response to delete.
4591  */
4592 static void
4593 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4594 {
4595 	bfa_trc(rp->bfa, rp->rport_tag);
4596 	bfa_trc(rp->bfa, event);
4597 
4598 	switch (event) {
4599 	case BFA_RPORT_SM_FWRSP:
4600 		bfa_stats(rp, sm_del_fwrsp);
4601 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4602 		bfa_rport_free(rp);
4603 		break;
4604 
4605 	case BFA_RPORT_SM_HWFAIL:
4606 		bfa_stats(rp, sm_del_hwf);
4607 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4608 		bfa_rport_free(rp);
4609 		break;
4610 
4611 	default:
4612 		bfa_sm_fault(rp->bfa, event);
4613 	}
4614 }
4615 
4616 static void
4617 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4618 {
4619 	bfa_trc(rp->bfa, rp->rport_tag);
4620 	bfa_trc(rp->bfa, event);
4621 
4622 	switch (event) {
4623 	case BFA_RPORT_SM_QRESUME:
4624 		bfa_stats(rp, sm_del_fwrsp);
4625 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4626 		bfa_rport_send_fwdelete(rp);
4627 		break;
4628 
4629 	case BFA_RPORT_SM_HWFAIL:
4630 		bfa_stats(rp, sm_del_hwf);
4631 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4632 		bfa_reqq_wcancel(&rp->reqq_wait);
4633 		bfa_rport_free(rp);
4634 		break;
4635 
4636 	default:
4637 		bfa_sm_fault(rp->bfa, event);
4638 	}
4639 }
4640 
4641 /*
4642  * Waiting for rport create response from firmware. A delete is pending.
4643  */
4644 static void
4645 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4646 				enum bfa_rport_event event)
4647 {
4648 	bfa_trc(rp->bfa, rp->rport_tag);
4649 	bfa_trc(rp->bfa, event);
4650 
4651 	switch (event) {
4652 	case BFA_RPORT_SM_FWRSP:
4653 		bfa_stats(rp, sm_delp_fwrsp);
4654 		if (bfa_rport_send_fwdelete(rp))
4655 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4656 		else
4657 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4658 		break;
4659 
4660 	case BFA_RPORT_SM_HWFAIL:
4661 		bfa_stats(rp, sm_delp_hwf);
4662 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4663 		bfa_rport_free(rp);
4664 		break;
4665 
4666 	default:
4667 		bfa_stats(rp, sm_delp_unexp);
4668 		bfa_sm_fault(rp->bfa, event);
4669 	}
4670 }
4671 
4672 /*
4673  * Waiting for rport create response from firmware. Rport offline is pending.
4674  */
4675 static void
4676 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4677 				 enum bfa_rport_event event)
4678 {
4679 	bfa_trc(rp->bfa, rp->rport_tag);
4680 	bfa_trc(rp->bfa, event);
4681 
4682 	switch (event) {
4683 	case BFA_RPORT_SM_FWRSP:
4684 		bfa_stats(rp, sm_offp_fwrsp);
4685 		if (bfa_rport_send_fwdelete(rp))
4686 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4687 		else
4688 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4689 		break;
4690 
4691 	case BFA_RPORT_SM_DELETE:
4692 		bfa_stats(rp, sm_offp_del);
4693 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4694 		break;
4695 
4696 	case BFA_RPORT_SM_HWFAIL:
4697 		bfa_stats(rp, sm_offp_hwf);
4698 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4699 		bfa_rport_offline_cb(rp);
4700 		break;
4701 
4702 	default:
4703 		bfa_stats(rp, sm_offp_unexp);
4704 		bfa_sm_fault(rp->bfa, event);
4705 	}
4706 }
4707 
4708 /*
4709  * IOC h/w failed.
4710  */
4711 static void
4712 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4713 {
4714 	bfa_trc(rp->bfa, rp->rport_tag);
4715 	bfa_trc(rp->bfa, event);
4716 
4717 	switch (event) {
4718 	case BFA_RPORT_SM_OFFLINE:
4719 		bfa_stats(rp, sm_iocd_off);
4720 		bfa_rport_offline_cb(rp);
4721 		break;
4722 
4723 	case BFA_RPORT_SM_DELETE:
4724 		bfa_stats(rp, sm_iocd_del);
4725 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4726 		bfa_rport_free(rp);
4727 		break;
4728 
4729 	case BFA_RPORT_SM_ONLINE:
4730 		bfa_stats(rp, sm_iocd_on);
4731 		if (bfa_rport_send_fwcreate(rp))
4732 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4733 		else
4734 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4735 		break;
4736 
4737 	case BFA_RPORT_SM_HWFAIL:
4738 		break;
4739 
4740 	default:
4741 		bfa_stats(rp, sm_iocd_unexp);
4742 		bfa_sm_fault(rp->bfa, event);
4743 	}
4744 }
4745 
4746 
4747 
4748 /*
4749  *  bfa_rport_private BFA rport private functions
4750  */
4751 
4752 static void
4753 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4754 {
4755 	struct bfa_rport_s *rp = cbarg;
4756 
4757 	if (complete)
4758 		bfa_cb_rport_online(rp->rport_drv);
4759 }
4760 
4761 static void
4762 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4763 {
4764 	struct bfa_rport_s *rp = cbarg;
4765 
4766 	if (complete)
4767 		bfa_cb_rport_offline(rp->rport_drv);
4768 }
4769 
4770 static void
4771 bfa_rport_qresume(void *cbarg)
4772 {
4773 	struct bfa_rport_s	*rp = cbarg;
4774 
4775 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4776 }
4777 
4778 void
4779 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4780 		struct bfa_s *bfa)
4781 {
4782 	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4783 
4784 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4785 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4786 
4787 	/* kva memory */
4788 	bfa_mem_kva_setup(minfo, rport_kva,
4789 		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4790 }
4791 
4792 void
4793 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4794 		struct bfa_pcidev_s *pcidev)
4795 {
4796 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4797 	struct bfa_rport_s *rp;
4798 	u16 i;
4799 
4800 	INIT_LIST_HEAD(&mod->rp_free_q);
4801 	INIT_LIST_HEAD(&mod->rp_active_q);
4802 	INIT_LIST_HEAD(&mod->rp_unused_q);
4803 
4804 	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4805 	mod->rps_list = rp;
4806 	mod->num_rports = cfg->fwcfg.num_rports;
4807 
4808 	WARN_ON(!mod->num_rports ||
4809 		   (mod->num_rports & (mod->num_rports - 1)));
4810 
4811 	for (i = 0; i < mod->num_rports; i++, rp++) {
4812 		memset(rp, 0, sizeof(struct bfa_rport_s));
4813 		rp->bfa = bfa;
4814 		rp->rport_tag = i;
4815 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4816 
4817 		/*
4818 		 *  - is unused
4819 		 */
4820 		if (i)
4821 			list_add_tail(&rp->qe, &mod->rp_free_q);
4822 
4823 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4824 	}
4825 
4826 	/*
4827 	 * consume memory
4828 	 */
4829 	bfa_mem_kva_curp(mod) = (u8 *) rp;
4830 }
4831 
4832 void
4833 bfa_rport_iocdisable(struct bfa_s *bfa)
4834 {
4835 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4836 	struct bfa_rport_s *rport;
4837 	struct list_head *qe, *qen;
4838 
4839 	/* Enqueue unused rport resources to free_q */
4840 	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4841 
4842 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4843 		rport = (struct bfa_rport_s *) qe;
4844 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4845 	}
4846 }
4847 
4848 static struct bfa_rport_s *
4849 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4850 {
4851 	struct bfa_rport_s *rport;
4852 
4853 	bfa_q_deq(&mod->rp_free_q, &rport);
4854 	if (rport)
4855 		list_add_tail(&rport->qe, &mod->rp_active_q);
4856 
4857 	return rport;
4858 }
4859 
4860 static void
4861 bfa_rport_free(struct bfa_rport_s *rport)
4862 {
4863 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4864 
4865 	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4866 	list_del(&rport->qe);
4867 	list_add_tail(&rport->qe, &mod->rp_free_q);
4868 }
4869 
4870 static bfa_boolean_t
4871 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4872 {
4873 	struct bfi_rport_create_req_s *m;
4874 
4875 	/*
4876 	 * check for room in queue to send request now
4877 	 */
4878 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4879 	if (!m) {
4880 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4881 		return BFA_FALSE;
4882 	}
4883 
4884 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4885 			bfa_fn_lpu(rp->bfa));
4886 	m->bfa_handle = rp->rport_tag;
4887 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4888 	m->pid = rp->rport_info.pid;
4889 	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4890 	m->local_pid = rp->rport_info.local_pid;
4891 	m->fc_class = rp->rport_info.fc_class;
4892 	m->vf_en = rp->rport_info.vf_en;
4893 	m->vf_id = rp->rport_info.vf_id;
4894 	m->cisc = rp->rport_info.cisc;
4895 
4896 	/*
4897 	 * queue I/O message to firmware
4898 	 */
4899 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4900 	return BFA_TRUE;
4901 }
4902 
4903 static bfa_boolean_t
4904 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4905 {
4906 	struct bfi_rport_delete_req_s *m;
4907 
4908 	/*
4909 	 * check for room in queue to send request now
4910 	 */
4911 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4912 	if (!m) {
4913 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4914 		return BFA_FALSE;
4915 	}
4916 
4917 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4918 			bfa_fn_lpu(rp->bfa));
4919 	m->fw_handle = rp->fw_handle;
4920 
4921 	/*
4922 	 * queue I/O message to firmware
4923 	 */
4924 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4925 	return BFA_TRUE;
4926 }
4927 
4928 static bfa_boolean_t
4929 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4930 {
4931 	struct bfa_rport_speed_req_s *m;
4932 
4933 	/*
4934 	 * check for room in queue to send request now
4935 	 */
4936 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4937 	if (!m) {
4938 		bfa_trc(rp->bfa, rp->rport_info.speed);
4939 		return BFA_FALSE;
4940 	}
4941 
4942 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4943 			bfa_fn_lpu(rp->bfa));
4944 	m->fw_handle = rp->fw_handle;
4945 	m->speed = (u8)rp->rport_info.speed;
4946 
4947 	/*
4948 	 * queue I/O message to firmware
4949 	 */
4950 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4951 	return BFA_TRUE;
4952 }
4953 
4954 
4955 
4956 /*
4957  *  bfa_rport_public
4958  */
4959 
4960 /*
4961  * Rport interrupt processing.
4962  */
4963 void
4964 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4965 {
4966 	union bfi_rport_i2h_msg_u msg;
4967 	struct bfa_rport_s *rp;
4968 
4969 	bfa_trc(bfa, m->mhdr.msg_id);
4970 
4971 	msg.msg = m;
4972 
4973 	switch (m->mhdr.msg_id) {
4974 	case BFI_RPORT_I2H_CREATE_RSP:
4975 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4976 		rp->fw_handle = msg.create_rsp->fw_handle;
4977 		rp->qos_attr = msg.create_rsp->qos_attr;
4978 		bfa_rport_set_lunmask(bfa, rp);
4979 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4980 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4981 		break;
4982 
4983 	case BFI_RPORT_I2H_DELETE_RSP:
4984 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4985 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4986 		bfa_rport_unset_lunmask(bfa, rp);
4987 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4988 		break;
4989 
4990 	case BFI_RPORT_I2H_QOS_SCN:
4991 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4992 		rp->event_arg.fw_msg = msg.qos_scn_evt;
4993 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4994 		break;
4995 
4996 	case BFI_RPORT_I2H_LIP_SCN_ONLINE:
4997 		bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
4998 				&msg.lip_scn->loop_info);
4999 		bfa_cb_rport_scn_online(bfa);
5000 		break;
5001 
5002 	case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5003 		bfa_cb_rport_scn_offline(bfa);
5004 		break;
5005 
5006 	case BFI_RPORT_I2H_NO_DEV:
5007 		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5008 		bfa_cb_rport_scn_no_dev(rp->rport_drv);
5009 		break;
5010 
5011 	default:
5012 		bfa_trc(bfa, m->mhdr.msg_id);
5013 		WARN_ON(1);
5014 	}
5015 }
5016 
5017 void
5018 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5019 {
5020 	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
5021 	struct list_head	*qe;
5022 	int	i;
5023 
5024 	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5025 		bfa_q_deq_tail(&mod->rp_free_q, &qe);
5026 		list_add_tail(qe, &mod->rp_unused_q);
5027 	}
5028 }
5029 
5030 /*
5031  *  bfa_rport_api
5032  */
5033 
5034 struct bfa_rport_s *
5035 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5036 {
5037 	struct bfa_rport_s *rp;
5038 
5039 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5040 
5041 	if (rp == NULL)
5042 		return NULL;
5043 
5044 	rp->bfa = bfa;
5045 	rp->rport_drv = rport_drv;
5046 	memset(&rp->stats, 0, sizeof(rp->stats));
5047 
5048 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5049 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5050 
5051 	return rp;
5052 }
5053 
5054 void
5055 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5056 {
5057 	WARN_ON(rport_info->max_frmsz == 0);
5058 
5059 	/*
5060 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5061 	 * responses. Default to minimum size.
5062 	 */
5063 	if (rport_info->max_frmsz == 0) {
5064 		bfa_trc(rport->bfa, rport->rport_tag);
5065 		rport_info->max_frmsz = FC_MIN_PDUSZ;
5066 	}
5067 
5068 	rport->rport_info = *rport_info;
5069 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5070 }
5071 
5072 void
5073 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5074 {
5075 	WARN_ON(speed == 0);
5076 	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5077 
5078 	if (rport) {
5079 		rport->rport_info.speed = speed;
5080 		bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5081 	}
5082 }
5083 
5084 /* Set Rport LUN Mask */
5085 void
5086 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5087 {
5088 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5089 	wwn_t	lp_wwn, rp_wwn;
5090 	u8 lp_tag = (u8)rp->rport_info.lp_tag;
5091 
5092 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5093 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5094 
5095 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5096 					rp->lun_mask = BFA_TRUE;
5097 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5098 }
5099 
5100 /* Unset Rport LUN mask */
5101 void
5102 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5103 {
5104 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5105 	wwn_t	lp_wwn, rp_wwn;
5106 
5107 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5108 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5109 
5110 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5111 				rp->lun_mask = BFA_FALSE;
5112 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5113 			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5114 }
5115 
5116 /*
5117  * SGPG related functions
5118  */
5119 
5120 /*
5121  * Compute and return memory needed by FCP(im) module.
5122  */
5123 void
5124 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5125 		struct bfa_s *bfa)
5126 {
5127 	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5128 	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5129 	struct bfa_mem_dma_s *seg_ptr;
5130 	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
5131 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5132 
5133 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5134 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5135 	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5136 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5137 
5138 	num_sgpg = cfg->drvcfg.num_sgpgs;
5139 
5140 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5141 	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5142 
5143 	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5144 		if (num_sgpg >= per_seg_sgpg) {
5145 			num_sgpg -= per_seg_sgpg;
5146 			bfa_mem_dma_setup(minfo, seg_ptr,
5147 					per_seg_sgpg * sgpg_sz);
5148 		} else
5149 			bfa_mem_dma_setup(minfo, seg_ptr,
5150 					num_sgpg * sgpg_sz);
5151 	}
5152 
5153 	/* kva memory */
5154 	bfa_mem_kva_setup(minfo, sgpg_kva,
5155 		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5156 }
5157 
5158 void
5159 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5160 		struct bfa_pcidev_s *pcidev)
5161 {
5162 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5163 	struct bfa_sgpg_s *hsgpg;
5164 	struct bfi_sgpg_s *sgpg;
5165 	u64 align_len;
5166 	struct bfa_mem_dma_s *seg_ptr;
5167 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5168 	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
5169 
5170 	union {
5171 		u64 pa;
5172 		union bfi_addr_u addr;
5173 	} sgpg_pa, sgpg_pa_tmp;
5174 
5175 	INIT_LIST_HEAD(&mod->sgpg_q);
5176 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
5177 
5178 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5179 
5180 	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5181 
5182 	num_sgpg = cfg->drvcfg.num_sgpgs;
5183 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5184 
5185 	/* dma/kva mem claim */
5186 	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5187 
5188 	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5189 
5190 		if (!bfa_mem_dma_virt(seg_ptr))
5191 			break;
5192 
5193 		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5194 					     bfa_mem_dma_phys(seg_ptr);
5195 
5196 		sgpg = (struct bfi_sgpg_s *)
5197 			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5198 		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5199 		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5200 
5201 		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5202 
5203 		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5204 			memset(hsgpg, 0, sizeof(*hsgpg));
5205 			memset(sgpg, 0, sizeof(*sgpg));
5206 
5207 			hsgpg->sgpg = sgpg;
5208 			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5209 			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5210 			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5211 
5212 			sgpg++;
5213 			hsgpg++;
5214 			sgpg_pa.pa += sgpg_sz;
5215 		}
5216 	}
5217 
5218 	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5219 }
5220 
5221 bfa_status_t
5222 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5223 {
5224 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5225 	struct bfa_sgpg_s *hsgpg;
5226 	int i;
5227 
5228 	if (mod->free_sgpgs < nsgpgs)
5229 		return BFA_STATUS_ENOMEM;
5230 
5231 	for (i = 0; i < nsgpgs; i++) {
5232 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
5233 		WARN_ON(!hsgpg);
5234 		list_add_tail(&hsgpg->qe, sgpg_q);
5235 	}
5236 
5237 	mod->free_sgpgs -= nsgpgs;
5238 	return BFA_STATUS_OK;
5239 }
5240 
5241 void
5242 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5243 {
5244 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5245 	struct bfa_sgpg_wqe_s *wqe;
5246 
5247 	mod->free_sgpgs += nsgpg;
5248 	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5249 
5250 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5251 
5252 	if (list_empty(&mod->sgpg_wait_q))
5253 		return;
5254 
5255 	/*
5256 	 * satisfy as many waiting requests as possible
5257 	 */
5258 	do {
5259 		wqe = bfa_q_first(&mod->sgpg_wait_q);
5260 		if (mod->free_sgpgs < wqe->nsgpg)
5261 			nsgpg = mod->free_sgpgs;
5262 		else
5263 			nsgpg = wqe->nsgpg;
5264 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5265 		wqe->nsgpg -= nsgpg;
5266 		if (wqe->nsgpg == 0) {
5267 			list_del(&wqe->qe);
5268 			wqe->cbfn(wqe->cbarg);
5269 		}
5270 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5271 }
5272 
5273 void
5274 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5275 {
5276 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5277 
5278 	WARN_ON(nsgpg <= 0);
5279 	WARN_ON(nsgpg <= mod->free_sgpgs);
5280 
5281 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5282 
5283 	/*
5284 	 * allocate any left to this one first
5285 	 */
5286 	if (mod->free_sgpgs) {
5287 		/*
5288 		 * no one else is waiting for SGPG
5289 		 */
5290 		WARN_ON(!list_empty(&mod->sgpg_wait_q));
5291 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5292 		wqe->nsgpg -= mod->free_sgpgs;
5293 		mod->free_sgpgs = 0;
5294 	}
5295 
5296 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5297 }
5298 
5299 void
5300 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5301 {
5302 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5303 
5304 	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5305 	list_del(&wqe->qe);
5306 
5307 	if (wqe->nsgpg_total != wqe->nsgpg)
5308 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5309 				   wqe->nsgpg_total - wqe->nsgpg);
5310 }
5311 
5312 void
5313 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5314 		   void *cbarg)
5315 {
5316 	INIT_LIST_HEAD(&wqe->sgpg_q);
5317 	wqe->cbfn = cbfn;
5318 	wqe->cbarg = cbarg;
5319 }
5320 
5321 /*
5322  *  UF related functions
5323  */
5324 /*
5325  *****************************************************************************
5326  * Internal functions
5327  *****************************************************************************
5328  */
5329 static void
5330 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5331 {
5332 	struct bfa_uf_s   *uf = cbarg;
5333 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5334 
5335 	if (complete)
5336 		ufm->ufrecv(ufm->cbarg, uf);
5337 }
5338 
5339 static void
5340 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5341 {
5342 	struct bfi_uf_buf_post_s *uf_bp_msg;
5343 	u16 i;
5344 	u16 buf_len;
5345 
5346 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5347 	uf_bp_msg = ufm->uf_buf_posts;
5348 
5349 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5350 	     i++, uf_bp_msg++) {
5351 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5352 
5353 		uf_bp_msg->buf_tag = i;
5354 		buf_len = sizeof(struct bfa_uf_buf_s);
5355 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5356 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5357 			    bfa_fn_lpu(ufm->bfa));
5358 		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5359 	}
5360 
5361 	/*
5362 	 * advance pointer beyond consumed memory
5363 	 */
5364 	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5365 }
5366 
5367 static void
5368 claim_ufs(struct bfa_uf_mod_s *ufm)
5369 {
5370 	u16 i;
5371 	struct bfa_uf_s   *uf;
5372 
5373 	/*
5374 	 * Claim block of memory for UF list
5375 	 */
5376 	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5377 
5378 	/*
5379 	 * Initialize UFs and queue it in UF free queue
5380 	 */
5381 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5382 		memset(uf, 0, sizeof(struct bfa_uf_s));
5383 		uf->bfa = ufm->bfa;
5384 		uf->uf_tag = i;
5385 		uf->pb_len = BFA_PER_UF_DMA_SZ;
5386 		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5387 		uf->buf_pa = ufm_pbs_pa(ufm, i);
5388 		list_add_tail(&uf->qe, &ufm->uf_free_q);
5389 	}
5390 
5391 	/*
5392 	 * advance memory pointer
5393 	 */
5394 	bfa_mem_kva_curp(ufm) = (u8 *) uf;
5395 }
5396 
5397 static void
5398 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5399 {
5400 	claim_ufs(ufm);
5401 	claim_uf_post_msgs(ufm);
5402 }
5403 
5404 void
5405 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5406 		struct bfa_s *bfa)
5407 {
5408 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5409 	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5410 	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
5411 	struct bfa_mem_dma_s *seg_ptr;
5412 	u16	nsegs, idx, per_seg_uf = 0;
5413 
5414 	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5415 	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5416 
5417 	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5418 		if (num_ufs >= per_seg_uf) {
5419 			num_ufs -= per_seg_uf;
5420 			bfa_mem_dma_setup(minfo, seg_ptr,
5421 				per_seg_uf * BFA_PER_UF_DMA_SZ);
5422 		} else
5423 			bfa_mem_dma_setup(minfo, seg_ptr,
5424 				num_ufs * BFA_PER_UF_DMA_SZ);
5425 	}
5426 
5427 	/* kva memory */
5428 	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5429 		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5430 }
5431 
5432 void
5433 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5434 		struct bfa_pcidev_s *pcidev)
5435 {
5436 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5437 
5438 	ufm->bfa = bfa;
5439 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5440 	INIT_LIST_HEAD(&ufm->uf_free_q);
5441 	INIT_LIST_HEAD(&ufm->uf_posted_q);
5442 	INIT_LIST_HEAD(&ufm->uf_unused_q);
5443 
5444 	uf_mem_claim(ufm);
5445 }
5446 
5447 static struct bfa_uf_s *
5448 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5449 {
5450 	struct bfa_uf_s   *uf;
5451 
5452 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5453 	return uf;
5454 }
5455 
5456 static void
5457 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5458 {
5459 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5460 }
5461 
5462 static bfa_status_t
5463 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5464 {
5465 	struct bfi_uf_buf_post_s *uf_post_msg;
5466 
5467 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5468 	if (!uf_post_msg)
5469 		return BFA_STATUS_FAILED;
5470 
5471 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5472 		      sizeof(struct bfi_uf_buf_post_s));
5473 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5474 
5475 	bfa_trc(ufm->bfa, uf->uf_tag);
5476 
5477 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5478 	return BFA_STATUS_OK;
5479 }
5480 
5481 static void
5482 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5483 {
5484 	struct bfa_uf_s   *uf;
5485 
5486 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5487 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5488 			break;
5489 	}
5490 }
5491 
5492 static void
5493 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5494 {
5495 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5496 	u16 uf_tag = m->buf_tag;
5497 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5498 	struct bfa_uf_buf_s *uf_buf;
5499 	uint8_t *buf;
5500 
5501 	uf_buf = (struct bfa_uf_buf_s *)
5502 			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5503 	buf = &uf_buf->d[0];
5504 
5505 	m->frm_len = be16_to_cpu(m->frm_len);
5506 	m->xfr_len = be16_to_cpu(m->xfr_len);
5507 
5508 	list_del(&uf->qe);	/* dequeue from posted queue */
5509 
5510 	uf->data_ptr = buf;
5511 	uf->data_len = m->xfr_len;
5512 
5513 	WARN_ON(uf->data_len < sizeof(struct fchs_s));
5514 
5515 	if (uf->data_len == sizeof(struct fchs_s)) {
5516 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5517 			       uf->data_len, (struct fchs_s *)buf);
5518 	} else {
5519 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5520 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5521 				      BFA_PL_EID_RX, uf->data_len,
5522 				      (struct fchs_s *)buf, pld_w0);
5523 	}
5524 
5525 	if (bfa->fcs)
5526 		__bfa_cb_uf_recv(uf, BFA_TRUE);
5527 	else
5528 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5529 }
5530 
5531 void
5532 bfa_uf_iocdisable(struct bfa_s *bfa)
5533 {
5534 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5535 	struct bfa_uf_s *uf;
5536 	struct list_head *qe, *qen;
5537 
5538 	/* Enqueue unused uf resources to free_q */
5539 	list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5540 
5541 	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5542 		uf = (struct bfa_uf_s *) qe;
5543 		list_del(&uf->qe);
5544 		bfa_uf_put(ufm, uf);
5545 	}
5546 }
5547 
5548 void
5549 bfa_uf_start(struct bfa_s *bfa)
5550 {
5551 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5552 }
5553 
5554 /*
5555  * Register handler for all unsolicted receive frames.
5556  *
5557  * @param[in]	bfa		BFA instance
5558  * @param[in]	ufrecv	receive handler function
5559  * @param[in]	cbarg	receive handler arg
5560  */
5561 void
5562 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5563 {
5564 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5565 
5566 	ufm->ufrecv = ufrecv;
5567 	ufm->cbarg = cbarg;
5568 }
5569 
5570 /*
5571  *	Free an unsolicited frame back to BFA.
5572  *
5573  * @param[in]		uf		unsolicited frame to be freed
5574  *
5575  * @return None
5576  */
5577 void
5578 bfa_uf_free(struct bfa_uf_s *uf)
5579 {
5580 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5581 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5582 }
5583 
5584 
5585 
5586 /*
5587  *  uf_pub BFA uf module public functions
5588  */
5589 void
5590 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5591 {
5592 	bfa_trc(bfa, msg->mhdr.msg_id);
5593 
5594 	switch (msg->mhdr.msg_id) {
5595 	case BFI_UF_I2H_FRM_RCVD:
5596 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5597 		break;
5598 
5599 	default:
5600 		bfa_trc(bfa, msg->mhdr.msg_id);
5601 		WARN_ON(1);
5602 	}
5603 }
5604 
5605 void
5606 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5607 {
5608 	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
5609 	struct list_head	*qe;
5610 	int	i;
5611 
5612 	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5613 		bfa_q_deq_tail(&mod->uf_free_q, &qe);
5614 		list_add_tail(qe, &mod->uf_unused_q);
5615 	}
5616 }
5617 
5618 /*
5619  *	Dport forward declaration
5620  */
5621 
5622 enum bfa_dport_test_state_e {
5623 	BFA_DPORT_ST_DISABLED	= 0,	/*!< dport is disabled */
5624 	BFA_DPORT_ST_INP	= 1,	/*!< test in progress */
5625 	BFA_DPORT_ST_COMP	= 2,	/*!< test complete successfully */
5626 	BFA_DPORT_ST_NO_SFP	= 3,	/*!< sfp is not present */
5627 	BFA_DPORT_ST_NOTSTART	= 4,	/*!< test not start dport is enabled */
5628 };
5629 
5630 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5631 				  enum bfa_dport_sm_event event);
5632 static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5633 				  enum bfa_dport_sm_event event);
5634 static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5635 				  enum bfa_dport_sm_event event);
5636 static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5637 				 enum bfa_dport_sm_event event);
5638 static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5639 				 enum bfa_dport_sm_event event);
5640 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5641 				   enum bfa_dport_sm_event event);
5642 static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5643 					enum bfa_dport_sm_event event);
5644 static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5645 				  enum bfa_dport_sm_event event);
5646 static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5647 				   enum bfa_dport_sm_event event);
5648 static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5649 				   enum bfa_dport_sm_event event);
5650 static void bfa_dport_qresume(void *cbarg);
5651 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5652 				struct bfi_diag_dport_rsp_s *msg);
5653 static void bfa_dport_scn(struct bfa_dport_s *dport,
5654 				struct bfi_diag_dport_scn_s *msg);
5655 
5656 /*
5657  *	BFA fcdiag module
5658  */
5659 #define BFA_DIAG_QTEST_TOV	1000    /* msec */
5660 
5661 /*
5662  *	Set port status to busy
5663  */
5664 static void
5665 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5666 {
5667 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5668 
5669 	if (fcdiag->lb.lock)
5670 		fcport->diag_busy = BFA_TRUE;
5671 	else
5672 		fcport->diag_busy = BFA_FALSE;
5673 }
5674 
5675 void
5676 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5677 		struct bfa_pcidev_s *pcidev)
5678 {
5679 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5680 	struct bfa_dport_s  *dport = &fcdiag->dport;
5681 
5682 	fcdiag->bfa             = bfa;
5683 	fcdiag->trcmod  = bfa->trcmod;
5684 	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
5685 	dport->bfa = bfa;
5686 	bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5687 	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5688 	dport->cbfn = NULL;
5689 	dport->cbarg = NULL;
5690 	dport->test_state = BFA_DPORT_ST_DISABLED;
5691 	memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5692 }
5693 
5694 void
5695 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5696 {
5697 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5698 	struct bfa_dport_s *dport = &fcdiag->dport;
5699 
5700 	bfa_trc(fcdiag, fcdiag->lb.lock);
5701 	if (fcdiag->lb.lock) {
5702 		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5703 		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5704 		fcdiag->lb.lock = 0;
5705 		bfa_fcdiag_set_busy_status(fcdiag);
5706 	}
5707 
5708 	bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5709 }
5710 
5711 static void
5712 bfa_fcdiag_queuetest_timeout(void *cbarg)
5713 {
5714 	struct bfa_fcdiag_s       *fcdiag = cbarg;
5715 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5716 
5717 	bfa_trc(fcdiag, fcdiag->qtest.all);
5718 	bfa_trc(fcdiag, fcdiag->qtest.count);
5719 
5720 	fcdiag->qtest.timer_active = 0;
5721 
5722 	res->status = BFA_STATUS_ETIMER;
5723 	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5724 	if (fcdiag->qtest.all)
5725 		res->queue  = fcdiag->qtest.all;
5726 
5727 	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5728 	fcdiag->qtest.status = BFA_STATUS_ETIMER;
5729 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5730 	fcdiag->qtest.lock = 0;
5731 }
5732 
5733 static bfa_status_t
5734 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5735 {
5736 	u32	i;
5737 	struct bfi_diag_qtest_req_s *req;
5738 
5739 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5740 	if (!req)
5741 		return BFA_STATUS_DEVBUSY;
5742 
5743 	/* build host command */
5744 	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5745 		bfa_fn_lpu(fcdiag->bfa));
5746 
5747 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5748 		req->data[i] = QTEST_PAT_DEFAULT;
5749 
5750 	bfa_trc(fcdiag, fcdiag->qtest.queue);
5751 	/* ring door bell */
5752 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5753 	return BFA_STATUS_OK;
5754 }
5755 
5756 static void
5757 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5758 			bfi_diag_qtest_rsp_t *rsp)
5759 {
5760 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5761 	bfa_status_t status = BFA_STATUS_OK;
5762 	int i;
5763 
5764 	/* Check timer, should still be active   */
5765 	if (!fcdiag->qtest.timer_active) {
5766 		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5767 		return;
5768 	}
5769 
5770 	/* update count */
5771 	fcdiag->qtest.count--;
5772 
5773 	/* Check result */
5774 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5775 		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5776 			res->status = BFA_STATUS_DATACORRUPTED;
5777 			break;
5778 		}
5779 	}
5780 
5781 	if (res->status == BFA_STATUS_OK) {
5782 		if (fcdiag->qtest.count > 0) {
5783 			status = bfa_fcdiag_queuetest_send(fcdiag);
5784 			if (status == BFA_STATUS_OK)
5785 				return;
5786 			else
5787 				res->status = status;
5788 		} else if (fcdiag->qtest.all > 0 &&
5789 			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5790 			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5791 			fcdiag->qtest.queue++;
5792 			status = bfa_fcdiag_queuetest_send(fcdiag);
5793 			if (status == BFA_STATUS_OK)
5794 				return;
5795 			else
5796 				res->status = status;
5797 		}
5798 	}
5799 
5800 	/* Stop timer when we comp all queue */
5801 	if (fcdiag->qtest.timer_active) {
5802 		bfa_timer_stop(&fcdiag->qtest.timer);
5803 		fcdiag->qtest.timer_active = 0;
5804 	}
5805 	res->queue = fcdiag->qtest.queue;
5806 	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5807 	bfa_trc(fcdiag, res->count);
5808 	bfa_trc(fcdiag, res->status);
5809 	fcdiag->qtest.status = res->status;
5810 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5811 	fcdiag->qtest.lock = 0;
5812 }
5813 
5814 static void
5815 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5816 			struct bfi_diag_lb_rsp_s *rsp)
5817 {
5818 	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5819 
5820 	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5821 	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5822 	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5823 	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5824 	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5825 	res->status     = rsp->res.status;
5826 	fcdiag->lb.status = rsp->res.status;
5827 	bfa_trc(fcdiag, fcdiag->lb.status);
5828 	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5829 	fcdiag->lb.lock = 0;
5830 	bfa_fcdiag_set_busy_status(fcdiag);
5831 }
5832 
5833 static bfa_status_t
5834 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5835 			struct bfa_diag_loopback_s *loopback)
5836 {
5837 	struct bfi_diag_lb_req_s *lb_req;
5838 
5839 	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5840 	if (!lb_req)
5841 		return BFA_STATUS_DEVBUSY;
5842 
5843 	/* build host command */
5844 	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5845 		bfa_fn_lpu(fcdiag->bfa));
5846 
5847 	lb_req->lb_mode = loopback->lb_mode;
5848 	lb_req->speed = loopback->speed;
5849 	lb_req->loopcnt = loopback->loopcnt;
5850 	lb_req->pattern = loopback->pattern;
5851 
5852 	/* ring door bell */
5853 	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5854 
5855 	bfa_trc(fcdiag, loopback->lb_mode);
5856 	bfa_trc(fcdiag, loopback->speed);
5857 	bfa_trc(fcdiag, loopback->loopcnt);
5858 	bfa_trc(fcdiag, loopback->pattern);
5859 	return BFA_STATUS_OK;
5860 }
5861 
5862 /*
5863  *	cpe/rme intr handler
5864  */
5865 void
5866 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5867 {
5868 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5869 
5870 	switch (msg->mhdr.msg_id) {
5871 	case BFI_DIAG_I2H_LOOPBACK:
5872 		bfa_fcdiag_loopback_comp(fcdiag,
5873 				(struct bfi_diag_lb_rsp_s *) msg);
5874 		break;
5875 	case BFI_DIAG_I2H_QTEST:
5876 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5877 		break;
5878 	case BFI_DIAG_I2H_DPORT:
5879 		bfa_dport_req_comp(&fcdiag->dport,
5880 				(struct bfi_diag_dport_rsp_s *)msg);
5881 		break;
5882 	case BFI_DIAG_I2H_DPORT_SCN:
5883 		bfa_dport_scn(&fcdiag->dport,
5884 				(struct bfi_diag_dport_scn_s *)msg);
5885 		break;
5886 	default:
5887 		bfa_trc(fcdiag, msg->mhdr.msg_id);
5888 		WARN_ON(1);
5889 	}
5890 }
5891 
5892 /*
5893  *	Loopback test
5894  *
5895  *   @param[in] *bfa            - bfa data struct
5896  *   @param[in] opmode          - port operation mode
5897  *   @param[in] speed           - port speed
5898  *   @param[in] lpcnt           - loop count
5899  *   @param[in] pat                     - pattern to build packet
5900  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5901  *   @param[in] cbfn            - callback function
5902  *   @param[in] cbarg           - callback functioin arg
5903  *
5904  *   @param[out]
5905  */
5906 bfa_status_t
5907 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5908 		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5909 		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5910 		void *cbarg)
5911 {
5912 	struct  bfa_diag_loopback_s loopback;
5913 	struct bfa_port_attr_s attr;
5914 	bfa_status_t status;
5915 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5916 
5917 	if (!bfa_iocfc_is_operational(bfa))
5918 		return BFA_STATUS_IOC_NON_OP;
5919 
5920 	/* if port is PBC disabled, return error */
5921 	if (bfa_fcport_is_pbcdisabled(bfa)) {
5922 		bfa_trc(fcdiag, BFA_STATUS_PBC);
5923 		return BFA_STATUS_PBC;
5924 	}
5925 
5926 	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5927 		bfa_trc(fcdiag, opmode);
5928 		return BFA_STATUS_PORT_NOT_DISABLED;
5929 	}
5930 
5931 	/*
5932 	 * Check if input speed is supported by the port mode
5933 	 */
5934 	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5935 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
5936 		      speed == BFA_PORT_SPEED_2GBPS ||
5937 		      speed == BFA_PORT_SPEED_4GBPS ||
5938 		      speed == BFA_PORT_SPEED_8GBPS ||
5939 		      speed == BFA_PORT_SPEED_16GBPS ||
5940 		      speed == BFA_PORT_SPEED_AUTO)) {
5941 			bfa_trc(fcdiag, speed);
5942 			return BFA_STATUS_UNSUPP_SPEED;
5943 		}
5944 		bfa_fcport_get_attr(bfa, &attr);
5945 		bfa_trc(fcdiag, attr.speed_supported);
5946 		if (speed > attr.speed_supported)
5947 			return BFA_STATUS_UNSUPP_SPEED;
5948 	} else {
5949 		if (speed != BFA_PORT_SPEED_10GBPS) {
5950 			bfa_trc(fcdiag, speed);
5951 			return BFA_STATUS_UNSUPP_SPEED;
5952 		}
5953 	}
5954 
5955 	/*
5956 	 * For CT2, 1G is not supported
5957 	 */
5958 	if ((speed == BFA_PORT_SPEED_1GBPS) &&
5959 	    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5960 		bfa_trc(fcdiag, speed);
5961 		return BFA_STATUS_UNSUPP_SPEED;
5962 	}
5963 
5964 	/* For Mezz card, port speed entered needs to be checked */
5965 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5966 		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5967 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
5968 			      speed == BFA_PORT_SPEED_2GBPS ||
5969 			      speed == BFA_PORT_SPEED_4GBPS ||
5970 			      speed == BFA_PORT_SPEED_8GBPS ||
5971 			      speed == BFA_PORT_SPEED_16GBPS ||
5972 			      speed == BFA_PORT_SPEED_AUTO))
5973 				return BFA_STATUS_UNSUPP_SPEED;
5974 		} else {
5975 			if (speed != BFA_PORT_SPEED_10GBPS)
5976 				return BFA_STATUS_UNSUPP_SPEED;
5977 		}
5978 	}
5979 	/* check to see if fcport is dport */
5980 	if (bfa_fcport_is_dport(bfa)) {
5981 		bfa_trc(fcdiag, fcdiag->lb.lock);
5982 		return BFA_STATUS_DPORT_ENABLED;
5983 	}
5984 	/* check to see if there is another destructive diag cmd running */
5985 	if (fcdiag->lb.lock) {
5986 		bfa_trc(fcdiag, fcdiag->lb.lock);
5987 		return BFA_STATUS_DEVBUSY;
5988 	}
5989 
5990 	fcdiag->lb.lock = 1;
5991 	loopback.lb_mode = opmode;
5992 	loopback.speed = speed;
5993 	loopback.loopcnt = lpcnt;
5994 	loopback.pattern = pat;
5995 	fcdiag->lb.result = result;
5996 	fcdiag->lb.cbfn = cbfn;
5997 	fcdiag->lb.cbarg = cbarg;
5998 	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5999 	bfa_fcdiag_set_busy_status(fcdiag);
6000 
6001 	/* Send msg to fw */
6002 	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6003 	return status;
6004 }
6005 
6006 /*
6007  *	DIAG queue test command
6008  *
6009  *   @param[in] *bfa            - bfa data struct
6010  *   @param[in] force           - 1: don't do ioc op checking
6011  *   @param[in] queue           - queue no. to test
6012  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6013  *   @param[in] cbfn            - callback function
6014  *   @param[in] *cbarg          - callback functioin arg
6015  *
6016  *   @param[out]
6017  */
6018 bfa_status_t
6019 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6020 		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6021 		void *cbarg)
6022 {
6023 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6024 	bfa_status_t status;
6025 	bfa_trc(fcdiag, force);
6026 	bfa_trc(fcdiag, queue);
6027 
6028 	if (!force && !bfa_iocfc_is_operational(bfa))
6029 		return BFA_STATUS_IOC_NON_OP;
6030 
6031 	/* check to see if there is another destructive diag cmd running */
6032 	if (fcdiag->qtest.lock) {
6033 		bfa_trc(fcdiag, fcdiag->qtest.lock);
6034 		return BFA_STATUS_DEVBUSY;
6035 	}
6036 
6037 	/* Initialization */
6038 	fcdiag->qtest.lock = 1;
6039 	fcdiag->qtest.cbfn = cbfn;
6040 	fcdiag->qtest.cbarg = cbarg;
6041 	fcdiag->qtest.result = result;
6042 	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6043 
6044 	/* Init test results */
6045 	fcdiag->qtest.result->status = BFA_STATUS_OK;
6046 	fcdiag->qtest.result->count  = 0;
6047 
6048 	/* send */
6049 	if (queue < BFI_IOC_MAX_CQS) {
6050 		fcdiag->qtest.result->queue  = (u8)queue;
6051 		fcdiag->qtest.queue = (u8)queue;
6052 		fcdiag->qtest.all   = 0;
6053 	} else {
6054 		fcdiag->qtest.result->queue  = 0;
6055 		fcdiag->qtest.queue = 0;
6056 		fcdiag->qtest.all   = 1;
6057 	}
6058 	status = bfa_fcdiag_queuetest_send(fcdiag);
6059 
6060 	/* Start a timer */
6061 	if (status == BFA_STATUS_OK) {
6062 		bfa_timer_start(bfa, &fcdiag->qtest.timer,
6063 				bfa_fcdiag_queuetest_timeout, fcdiag,
6064 				BFA_DIAG_QTEST_TOV);
6065 		fcdiag->qtest.timer_active = 1;
6066 	}
6067 	return status;
6068 }
6069 
6070 /*
6071  * DIAG PLB is running
6072  *
6073  *   @param[in] *bfa    - bfa data struct
6074  *
6075  *   @param[out]
6076  */
6077 bfa_status_t
6078 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6079 {
6080 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6081 	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6082 }
6083 
6084 /*
6085  *	D-port
6086  */
6087 #define bfa_dport_result_start(__dport, __mode) do {				\
6088 		(__dport)->result.start_time = ktime_get_real_seconds();	\
6089 		(__dport)->result.status = DPORT_TEST_ST_INPRG;			\
6090 		(__dport)->result.mode = (__mode);				\
6091 		(__dport)->result.rp_pwwn = (__dport)->rp_pwwn;			\
6092 		(__dport)->result.rp_nwwn = (__dport)->rp_nwwn;			\
6093 		(__dport)->result.lpcnt = (__dport)->lpcnt;			\
6094 } while (0)
6095 
6096 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6097 					enum bfi_dport_req req);
6098 static void
6099 bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6100 {
6101 	if (dport->cbfn != NULL) {
6102 		dport->cbfn(dport->cbarg, bfa_status);
6103 		dport->cbfn = NULL;
6104 		dport->cbarg = NULL;
6105 	}
6106 }
6107 
6108 static void
6109 bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6110 {
6111 	bfa_trc(dport->bfa, event);
6112 
6113 	switch (event) {
6114 	case BFA_DPORT_SM_ENABLE:
6115 		bfa_fcport_dportenable(dport->bfa);
6116 		if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6117 			bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6118 		else
6119 			bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6120 		break;
6121 
6122 	case BFA_DPORT_SM_DISABLE:
6123 		/* Already disabled */
6124 		break;
6125 
6126 	case BFA_DPORT_SM_HWFAIL:
6127 		/* ignore */
6128 		break;
6129 
6130 	case BFA_DPORT_SM_SCN:
6131 		if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6132 			bfa_fcport_ddportenable(dport->bfa);
6133 			dport->dynamic = BFA_TRUE;
6134 			dport->test_state = BFA_DPORT_ST_NOTSTART;
6135 			bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6136 		} else {
6137 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6138 			WARN_ON(1);
6139 		}
6140 		break;
6141 
6142 	default:
6143 		bfa_sm_fault(dport->bfa, event);
6144 	}
6145 }
6146 
6147 static void
6148 bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6149 			    enum bfa_dport_sm_event event)
6150 {
6151 	bfa_trc(dport->bfa, event);
6152 
6153 	switch (event) {
6154 	case BFA_DPORT_SM_QRESUME:
6155 		bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6156 		bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6157 		break;
6158 
6159 	case BFA_DPORT_SM_HWFAIL:
6160 		bfa_reqq_wcancel(&dport->reqq_wait);
6161 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6162 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6163 		break;
6164 
6165 	default:
6166 		bfa_sm_fault(dport->bfa, event);
6167 	}
6168 }
6169 
6170 static void
6171 bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6172 {
6173 	bfa_trc(dport->bfa, event);
6174 
6175 	switch (event) {
6176 	case BFA_DPORT_SM_FWRSP:
6177 		memset(&dport->result, 0,
6178 				sizeof(struct bfa_diag_dport_result_s));
6179 		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6180 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6181 		} else {
6182 			dport->test_state = BFA_DPORT_ST_INP;
6183 			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6184 		}
6185 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6186 		break;
6187 
6188 	case BFA_DPORT_SM_REQFAIL:
6189 		dport->test_state = BFA_DPORT_ST_DISABLED;
6190 		bfa_fcport_dportdisable(dport->bfa);
6191 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6192 		break;
6193 
6194 	case BFA_DPORT_SM_HWFAIL:
6195 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6196 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6197 		break;
6198 
6199 	default:
6200 		bfa_sm_fault(dport->bfa, event);
6201 	}
6202 }
6203 
6204 static void
6205 bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6206 {
6207 	bfa_trc(dport->bfa, event);
6208 
6209 	switch (event) {
6210 	case BFA_DPORT_SM_START:
6211 		if (bfa_dport_send_req(dport, BFI_DPORT_START))
6212 			bfa_sm_set_state(dport, bfa_dport_sm_starting);
6213 		else
6214 			bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6215 		break;
6216 
6217 	case BFA_DPORT_SM_DISABLE:
6218 		bfa_fcport_dportdisable(dport->bfa);
6219 		if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6220 			bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6221 		else
6222 			bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6223 		break;
6224 
6225 	case BFA_DPORT_SM_HWFAIL:
6226 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6227 		break;
6228 
6229 	case BFA_DPORT_SM_SCN:
6230 		switch (dport->i2hmsg.scn.state) {
6231 		case BFI_DPORT_SCN_TESTCOMP:
6232 			dport->test_state = BFA_DPORT_ST_COMP;
6233 			break;
6234 
6235 		case BFI_DPORT_SCN_TESTSTART:
6236 			dport->test_state = BFA_DPORT_ST_INP;
6237 			break;
6238 
6239 		case BFI_DPORT_SCN_TESTSKIP:
6240 		case BFI_DPORT_SCN_SUBTESTSTART:
6241 			/* no state change */
6242 			break;
6243 
6244 		case BFI_DPORT_SCN_SFP_REMOVED:
6245 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6246 			break;
6247 
6248 		case BFI_DPORT_SCN_DDPORT_DISABLE:
6249 			bfa_fcport_ddportdisable(dport->bfa);
6250 
6251 			if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6252 				bfa_sm_set_state(dport,
6253 					 bfa_dport_sm_dynamic_disabling);
6254 			else
6255 				bfa_sm_set_state(dport,
6256 					 bfa_dport_sm_dynamic_disabling_qwait);
6257 			break;
6258 
6259 		case BFI_DPORT_SCN_FCPORT_DISABLE:
6260 			bfa_fcport_ddportdisable(dport->bfa);
6261 
6262 			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6263 			dport->dynamic = BFA_FALSE;
6264 			break;
6265 
6266 		default:
6267 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6268 			bfa_sm_fault(dport->bfa, event);
6269 		}
6270 		break;
6271 	default:
6272 		bfa_sm_fault(dport->bfa, event);
6273 	}
6274 }
6275 
6276 static void
6277 bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6278 			     enum bfa_dport_sm_event event)
6279 {
6280 	bfa_trc(dport->bfa, event);
6281 
6282 	switch (event) {
6283 	case BFA_DPORT_SM_QRESUME:
6284 		bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6285 		bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6286 		break;
6287 
6288 	case BFA_DPORT_SM_HWFAIL:
6289 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6290 		bfa_reqq_wcancel(&dport->reqq_wait);
6291 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6292 		break;
6293 
6294 	case BFA_DPORT_SM_SCN:
6295 		/* ignore */
6296 		break;
6297 
6298 	default:
6299 		bfa_sm_fault(dport->bfa, event);
6300 	}
6301 }
6302 
6303 static void
6304 bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6305 {
6306 	bfa_trc(dport->bfa, event);
6307 
6308 	switch (event) {
6309 	case BFA_DPORT_SM_FWRSP:
6310 		dport->test_state = BFA_DPORT_ST_DISABLED;
6311 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6312 		break;
6313 
6314 	case BFA_DPORT_SM_HWFAIL:
6315 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6316 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6317 		break;
6318 
6319 	case BFA_DPORT_SM_SCN:
6320 		/* no state change */
6321 		break;
6322 
6323 	default:
6324 		bfa_sm_fault(dport->bfa, event);
6325 	}
6326 }
6327 
6328 static void
6329 bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6330 			    enum bfa_dport_sm_event event)
6331 {
6332 	bfa_trc(dport->bfa, event);
6333 
6334 	switch (event) {
6335 	case BFA_DPORT_SM_QRESUME:
6336 		bfa_sm_set_state(dport, bfa_dport_sm_starting);
6337 		bfa_dport_send_req(dport, BFI_DPORT_START);
6338 		break;
6339 
6340 	case BFA_DPORT_SM_HWFAIL:
6341 		bfa_reqq_wcancel(&dport->reqq_wait);
6342 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6343 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6344 		break;
6345 
6346 	default:
6347 		bfa_sm_fault(dport->bfa, event);
6348 	}
6349 }
6350 
6351 static void
6352 bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6353 {
6354 	bfa_trc(dport->bfa, event);
6355 
6356 	switch (event) {
6357 	case BFA_DPORT_SM_FWRSP:
6358 		memset(&dport->result, 0,
6359 				sizeof(struct bfa_diag_dport_result_s));
6360 		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6361 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6362 		} else {
6363 			dport->test_state = BFA_DPORT_ST_INP;
6364 			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6365 		}
6366 		fallthrough;
6367 
6368 	case BFA_DPORT_SM_REQFAIL:
6369 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6370 		break;
6371 
6372 	case BFA_DPORT_SM_HWFAIL:
6373 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6374 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6375 		break;
6376 
6377 	default:
6378 		bfa_sm_fault(dport->bfa, event);
6379 	}
6380 }
6381 
6382 static void
6383 bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6384 			       enum bfa_dport_sm_event event)
6385 {
6386 	bfa_trc(dport->bfa, event);
6387 
6388 	switch (event) {
6389 	case BFA_DPORT_SM_SCN:
6390 		switch (dport->i2hmsg.scn.state) {
6391 		case BFI_DPORT_SCN_DDPORT_DISABLED:
6392 			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6393 			dport->dynamic = BFA_FALSE;
6394 			bfa_fcport_enable(dport->bfa);
6395 			break;
6396 
6397 		default:
6398 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6399 			bfa_sm_fault(dport->bfa, event);
6400 
6401 		}
6402 		break;
6403 
6404 	case BFA_DPORT_SM_HWFAIL:
6405 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6406 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6407 		break;
6408 
6409 	default:
6410 		bfa_sm_fault(dport->bfa, event);
6411 	}
6412 }
6413 
6414 static void
6415 bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6416 			    enum bfa_dport_sm_event event)
6417 {
6418 	bfa_trc(dport->bfa, event);
6419 
6420 	switch (event) {
6421 	case BFA_DPORT_SM_QRESUME:
6422 		bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6423 		bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6424 		break;
6425 
6426 	case BFA_DPORT_SM_HWFAIL:
6427 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6428 		bfa_reqq_wcancel(&dport->reqq_wait);
6429 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6430 		break;
6431 
6432 	case BFA_DPORT_SM_SCN:
6433 		/* ignore */
6434 		break;
6435 
6436 	default:
6437 		bfa_sm_fault(dport->bfa, event);
6438 	}
6439 }
6440 
6441 static bfa_boolean_t
6442 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6443 {
6444 	struct bfi_diag_dport_req_s *m;
6445 
6446 	/*
6447 	 * check for room in queue to send request now
6448 	 */
6449 	m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6450 	if (!m) {
6451 		bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6452 		return BFA_FALSE;
6453 	}
6454 
6455 	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6456 		    bfa_fn_lpu(dport->bfa));
6457 	m->req  = req;
6458 	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6459 		m->lpcnt = cpu_to_be32(dport->lpcnt);
6460 		m->payload = cpu_to_be32(dport->payload);
6461 	}
6462 
6463 	/*
6464 	 * queue I/O message to firmware
6465 	 */
6466 	bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6467 
6468 	return BFA_TRUE;
6469 }
6470 
6471 static void
6472 bfa_dport_qresume(void *cbarg)
6473 {
6474 	struct bfa_dport_s *dport = cbarg;
6475 
6476 	bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6477 }
6478 
6479 static void
6480 bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6481 {
6482 	msg->status = cpu_to_be32(msg->status);
6483 	dport->i2hmsg.rsp.status = msg->status;
6484 	dport->rp_pwwn = msg->pwwn;
6485 	dport->rp_nwwn = msg->nwwn;
6486 
6487 	if ((msg->status == BFA_STATUS_OK) ||
6488 	    (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6489 		bfa_trc(dport->bfa, msg->status);
6490 		bfa_trc(dport->bfa, dport->rp_pwwn);
6491 		bfa_trc(dport->bfa, dport->rp_nwwn);
6492 		bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6493 
6494 	} else {
6495 		bfa_trc(dport->bfa, msg->status);
6496 		bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6497 	}
6498 	bfa_cb_fcdiag_dport(dport, msg->status);
6499 }
6500 
6501 static bfa_boolean_t
6502 bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6503 {
6504 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)	||
6505 	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6506 	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)	||
6507 	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6508 	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting)	||
6509 	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6510 		return BFA_TRUE;
6511 	} else {
6512 		return BFA_FALSE;
6513 	}
6514 }
6515 
6516 static void
6517 bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6518 {
6519 	int i;
6520 	uint8_t subtesttype;
6521 
6522 	bfa_trc(dport->bfa, msg->state);
6523 	dport->i2hmsg.scn.state = msg->state;
6524 
6525 	switch (dport->i2hmsg.scn.state) {
6526 	case BFI_DPORT_SCN_TESTCOMP:
6527 		dport->result.end_time = ktime_get_real_seconds();
6528 		bfa_trc(dport->bfa, dport->result.end_time);
6529 
6530 		dport->result.status = msg->info.testcomp.status;
6531 		bfa_trc(dport->bfa, dport->result.status);
6532 
6533 		dport->result.roundtrip_latency =
6534 			cpu_to_be32(msg->info.testcomp.latency);
6535 		dport->result.est_cable_distance =
6536 			cpu_to_be32(msg->info.testcomp.distance);
6537 		dport->result.buffer_required =
6538 			be16_to_cpu(msg->info.testcomp.numbuffer);
6539 
6540 		dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6541 		dport->result.speed = msg->info.testcomp.speed;
6542 
6543 		bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6544 		bfa_trc(dport->bfa, dport->result.est_cable_distance);
6545 		bfa_trc(dport->bfa, dport->result.buffer_required);
6546 		bfa_trc(dport->bfa, dport->result.frmsz);
6547 		bfa_trc(dport->bfa, dport->result.speed);
6548 
6549 		for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6550 			dport->result.subtest[i].status =
6551 				msg->info.testcomp.subtest_status[i];
6552 			bfa_trc(dport->bfa, dport->result.subtest[i].status);
6553 		}
6554 		break;
6555 
6556 	case BFI_DPORT_SCN_TESTSKIP:
6557 	case BFI_DPORT_SCN_DDPORT_ENABLE:
6558 		memset(&dport->result, 0,
6559 				sizeof(struct bfa_diag_dport_result_s));
6560 		break;
6561 
6562 	case BFI_DPORT_SCN_TESTSTART:
6563 		memset(&dport->result, 0,
6564 				sizeof(struct bfa_diag_dport_result_s));
6565 		dport->rp_pwwn = msg->info.teststart.pwwn;
6566 		dport->rp_nwwn = msg->info.teststart.nwwn;
6567 		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6568 		bfa_dport_result_start(dport, msg->info.teststart.mode);
6569 		break;
6570 
6571 	case BFI_DPORT_SCN_SUBTESTSTART:
6572 		subtesttype = msg->info.teststart.type;
6573 		dport->result.subtest[subtesttype].start_time =
6574 			ktime_get_real_seconds();
6575 		dport->result.subtest[subtesttype].status =
6576 			DPORT_TEST_ST_INPRG;
6577 
6578 		bfa_trc(dport->bfa, subtesttype);
6579 		bfa_trc(dport->bfa,
6580 			dport->result.subtest[subtesttype].start_time);
6581 		break;
6582 
6583 	case BFI_DPORT_SCN_SFP_REMOVED:
6584 	case BFI_DPORT_SCN_DDPORT_DISABLED:
6585 	case BFI_DPORT_SCN_DDPORT_DISABLE:
6586 	case BFI_DPORT_SCN_FCPORT_DISABLE:
6587 		dport->result.status = DPORT_TEST_ST_IDLE;
6588 		break;
6589 
6590 	default:
6591 		bfa_sm_fault(dport->bfa, msg->state);
6592 	}
6593 
6594 	bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6595 }
6596 
6597 /*
6598  * Dport enable
6599  *
6600  * @param[in] *bfa            - bfa data struct
6601  */
6602 bfa_status_t
6603 bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6604 				bfa_cb_diag_t cbfn, void *cbarg)
6605 {
6606 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6607 	struct bfa_dport_s  *dport = &fcdiag->dport;
6608 
6609 	/*
6610 	 * Dport is not support in MEZZ card
6611 	 */
6612 	if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6613 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6614 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6615 	}
6616 
6617 	/*
6618 	 * Dport is supported in CT2 or above
6619 	 */
6620 	if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6621 		bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6622 		return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6623 	}
6624 
6625 	/*
6626 	 * Check to see if IOC is down
6627 	*/
6628 	if (!bfa_iocfc_is_operational(bfa))
6629 		return BFA_STATUS_IOC_NON_OP;
6630 
6631 	/* if port is PBC disabled, return error */
6632 	if (bfa_fcport_is_pbcdisabled(bfa)) {
6633 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6634 		return BFA_STATUS_PBC;
6635 	}
6636 
6637 	/*
6638 	 * Check if port mode is FC port
6639 	 */
6640 	if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6641 		bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6642 		return BFA_STATUS_CMD_NOTSUPP_CNA;
6643 	}
6644 
6645 	/*
6646 	 * Check if port is in LOOP mode
6647 	 */
6648 	if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6649 	    (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6650 		bfa_trc(dport->bfa, 0);
6651 		return BFA_STATUS_TOPOLOGY_LOOP;
6652 	}
6653 
6654 	/*
6655 	 * Check if port is TRUNK mode
6656 	 */
6657 	if (bfa_fcport_is_trunk_enabled(bfa)) {
6658 		bfa_trc(dport->bfa, 0);
6659 		return BFA_STATUS_ERROR_TRUNK_ENABLED;
6660 	}
6661 
6662 	/*
6663 	 * Check if diag loopback is running
6664 	 */
6665 	if (bfa_fcdiag_lb_is_running(bfa)) {
6666 		bfa_trc(dport->bfa, 0);
6667 		return BFA_STATUS_DIAG_BUSY;
6668 	}
6669 
6670 	/*
6671 	 * Check to see if port is disable or in dport state
6672 	 */
6673 	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6674 	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6675 		bfa_trc(dport->bfa, 0);
6676 		return BFA_STATUS_PORT_NOT_DISABLED;
6677 	}
6678 
6679 	/*
6680 	 * Check if dport is in dynamic mode
6681 	 */
6682 	if (dport->dynamic)
6683 		return BFA_STATUS_DDPORT_ERR;
6684 
6685 	/*
6686 	 * Check if dport is busy
6687 	 */
6688 	if (bfa_dport_is_sending_req(dport))
6689 		return BFA_STATUS_DEVBUSY;
6690 
6691 	/*
6692 	 * Check if dport is already enabled
6693 	 */
6694 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6695 		bfa_trc(dport->bfa, 0);
6696 		return BFA_STATUS_DPORT_ENABLED;
6697 	}
6698 
6699 	bfa_trc(dport->bfa, lpcnt);
6700 	bfa_trc(dport->bfa, pat);
6701 	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6702 	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6703 	dport->cbfn = cbfn;
6704 	dport->cbarg = cbarg;
6705 
6706 	bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6707 	return BFA_STATUS_OK;
6708 }
6709 
6710 /*
6711  *	Dport disable
6712  *
6713  *	@param[in] *bfa            - bfa data struct
6714  */
6715 bfa_status_t
6716 bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6717 {
6718 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6719 	struct bfa_dport_s *dport = &fcdiag->dport;
6720 
6721 	if (bfa_ioc_is_disabled(&bfa->ioc))
6722 		return BFA_STATUS_IOC_DISABLED;
6723 
6724 	/* if port is PBC disabled, return error */
6725 	if (bfa_fcport_is_pbcdisabled(bfa)) {
6726 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6727 		return BFA_STATUS_PBC;
6728 	}
6729 
6730 	/*
6731 	 * Check if dport is in dynamic mode
6732 	 */
6733 	if (dport->dynamic) {
6734 		return BFA_STATUS_DDPORT_ERR;
6735 	}
6736 
6737 	/*
6738 	 * Check to see if port is disable or in dport state
6739 	 */
6740 	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6741 	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6742 		bfa_trc(dport->bfa, 0);
6743 		return BFA_STATUS_PORT_NOT_DISABLED;
6744 	}
6745 
6746 	/*
6747 	 * Check if dport is busy
6748 	 */
6749 	if (bfa_dport_is_sending_req(dport))
6750 		return BFA_STATUS_DEVBUSY;
6751 
6752 	/*
6753 	 * Check if dport is already disabled
6754 	 */
6755 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6756 		bfa_trc(dport->bfa, 0);
6757 		return BFA_STATUS_DPORT_DISABLED;
6758 	}
6759 
6760 	dport->cbfn = cbfn;
6761 	dport->cbarg = cbarg;
6762 
6763 	bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6764 	return BFA_STATUS_OK;
6765 }
6766 
6767 /*
6768  * Dport start -- restart dport test
6769  *
6770  *   @param[in] *bfa		- bfa data struct
6771  */
6772 bfa_status_t
6773 bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6774 			bfa_cb_diag_t cbfn, void *cbarg)
6775 {
6776 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6777 	struct bfa_dport_s *dport = &fcdiag->dport;
6778 
6779 	/*
6780 	 * Check to see if IOC is down
6781 	 */
6782 	if (!bfa_iocfc_is_operational(bfa))
6783 		return BFA_STATUS_IOC_NON_OP;
6784 
6785 	/*
6786 	 * Check if dport is in dynamic mode
6787 	 */
6788 	if (dport->dynamic)
6789 		return BFA_STATUS_DDPORT_ERR;
6790 
6791 	/*
6792 	 * Check if dport is busy
6793 	 */
6794 	if (bfa_dport_is_sending_req(dport))
6795 		return BFA_STATUS_DEVBUSY;
6796 
6797 	/*
6798 	 * Check if dport is in enabled state.
6799 	 * Test can only be restart when previous test has completed
6800 	 */
6801 	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6802 		bfa_trc(dport->bfa, 0);
6803 		return BFA_STATUS_DPORT_DISABLED;
6804 
6805 	} else {
6806 		if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6807 			return BFA_STATUS_DPORT_INV_SFP;
6808 
6809 		if (dport->test_state == BFA_DPORT_ST_INP)
6810 			return BFA_STATUS_DEVBUSY;
6811 
6812 		WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6813 	}
6814 
6815 	bfa_trc(dport->bfa, lpcnt);
6816 	bfa_trc(dport->bfa, pat);
6817 
6818 	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6819 	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6820 
6821 	dport->cbfn = cbfn;
6822 	dport->cbarg = cbarg;
6823 
6824 	bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6825 	return BFA_STATUS_OK;
6826 }
6827 
6828 /*
6829  * Dport show -- return dport test result
6830  *
6831  *   @param[in] *bfa		- bfa data struct
6832  */
6833 bfa_status_t
6834 bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6835 {
6836 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6837 	struct bfa_dport_s *dport = &fcdiag->dport;
6838 
6839 	/*
6840 	 * Check to see if IOC is down
6841 	 */
6842 	if (!bfa_iocfc_is_operational(bfa))
6843 		return BFA_STATUS_IOC_NON_OP;
6844 
6845 	/*
6846 	 * Check if dport is busy
6847 	 */
6848 	if (bfa_dport_is_sending_req(dport))
6849 		return BFA_STATUS_DEVBUSY;
6850 
6851 	/*
6852 	 * Check if dport is in enabled state.
6853 	 */
6854 	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6855 		bfa_trc(dport->bfa, 0);
6856 		return BFA_STATUS_DPORT_DISABLED;
6857 
6858 	}
6859 
6860 	/*
6861 	 * Check if there is SFP
6862 	 */
6863 	if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6864 		return BFA_STATUS_DPORT_INV_SFP;
6865 
6866 	memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6867 
6868 	return BFA_STATUS_OK;
6869 }
6870