xref: /freebsd/sys/dev/ocs_fc/ocs_unsol.c (revision 1d386b48)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  * Code to handle unsolicited received FC frames.
35  */
36 
37 /*!
38  * @defgroup unsol Unsolicited Frame Handling
39  */
40 
41 #include "ocs.h"
42 #include "ocs_els.h"
43 #include "ocs_fabric.h"
44 #include "ocs_device.h"
45 
46 #define frame_printf(ocs, hdr, fmt, ...) \
47 	do { \
48 		char s_id_text[16]; \
49 		ocs_node_fcid_display(fc_be24toh((hdr)->s_id), s_id_text, sizeof(s_id_text)); \
50 		ocs_log_debug(ocs, "[%06x.%s] %02x/%04x/%04x: " fmt, fc_be24toh((hdr)->d_id), s_id_text, \
51 			(hdr)->r_ctl, ocs_be16toh((hdr)->ox_id), ocs_be16toh((hdr)->rx_id), ##__VA_ARGS__); \
52 	} while(0)
53 
54 static int32_t ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq);
55 static int32_t ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq);
56 static int32_t ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq);
57 static int32_t ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq);
58 static int32_t ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
59 static int32_t ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
60 static int32_t ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg);
61 static ocs_hw_sequence_t *ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock);
62 static uint8_t ocs_node_frames_held(void *arg);
63 static uint8_t ocs_domain_frames_held(void *arg);
64 static int32_t ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock);
65 static int32_t ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq);
66 
67 #define OCS_MAX_FRAMES_BEFORE_YEILDING 10000
68 
69 /**
70  * @brief Process the RQ circular buffer and process the incoming frames.
71  *
72  * @param mythread Pointer to thread object.
73  *
74  * @return Returns 0 on success, or a non-zero value on failure.
75  */
76 int32_t
77 ocs_unsol_rq_thread(ocs_thread_t *mythread)
78 {
79 	ocs_xport_rq_thread_info_t *thread_data = mythread->arg;
80 	ocs_t *ocs = thread_data->ocs;
81 	ocs_hw_sequence_t *seq;
82 	uint32_t yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
83 
84 	ocs_log_debug(ocs, "%s running\n", mythread->name);
85 	while (!ocs_thread_terminate_requested(mythread)) {
86 		seq = ocs_cbuf_get(thread_data->seq_cbuf, 100000);
87 		if (seq == NULL) {
88 			/* Prevent soft lockups by yielding the CPU */
89 			ocs_thread_yield(&thread_data->thread);
90 			yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
91 			continue;
92 		}
93 		/* Note: Always returns 0 */
94 		ocs_unsol_process((ocs_t*)seq->hw->os, seq);
95 
96 		/* We have to prevent CPU soft lockups, so just yield the CPU after x frames. */
97 		if (--yield_count == 0) {
98 			ocs_thread_yield(&thread_data->thread);
99 			yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
100 		}
101 	}
102 	ocs_log_debug(ocs, "%s exiting\n", mythread->name);
103 	thread_data->thread_started = FALSE;
104 	return 0;
105 }
106 
107 /**
108  * @ingroup unsol
109  * @brief Callback function when aborting a port owned XRI
110  * exchanges.
111  *
112  * @return Returns 0.
113  */
114 static int32_t
115 ocs_unsol_abort_cb (ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext, void *arg)
116 {
117 	ocs_t *ocs = arg;
118 	ocs_assert(hio, -1);
119 	ocs_assert(arg, -1);
120 	ocs_log_debug(ocs, "xri=0x%x tag=0x%x\n", hio->indicator, hio->reqtag);
121 	ocs_hw_io_free(&ocs->hw, hio);
122 	return 0;
123 }
124 
125 /**
126  * @ingroup unsol
127  * @brief Abort either a RQ Pair auto XFER RDY XRI.
128  * @return Returns None.
129  */
130 static void
131 ocs_port_owned_abort(ocs_t *ocs, ocs_hw_io_t *hio)
132 {
133 	ocs_hw_rtn_e hw_rc;
134 	hw_rc = ocs_hw_io_abort(&ocs->hw, hio, FALSE,
135 				  ocs_unsol_abort_cb, ocs);
136 	if((hw_rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) ||
137 	   (hw_rc == OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED)) {
138 		ocs_log_debug(ocs, "already aborted XRI 0x%x\n", hio->indicator);
139 	} else if(hw_rc != OCS_HW_RTN_SUCCESS) {
140 		ocs_log_debug(ocs, "Error aborting XRI 0x%x status %d\n",
141 			      hio->indicator, hw_rc);
142 	}
143 }
144 
145 /**
146  * @ingroup unsol
147  * @brief Handle unsolicited FC frames.
148  *
149  * <h3 class="desc">Description</h3>
150  * This function is called from the HW with unsolicited FC frames (FCP, ELS, BLS, etc.).
151  *
152  * @param arg Application-specified callback data.
153  * @param seq Header/payload sequence buffers.
154  *
155  * @return Returns 0 on success; or a negative error value on failure.
156  */
157 
158 int32_t
159 ocs_unsolicited_cb(void *arg, ocs_hw_sequence_t *seq)
160 {
161 	ocs_t *ocs = arg;
162 	ocs_xport_t *xport = ocs->xport;
163 	int32_t rc;
164 
165 	CPUTRACE("");
166 
167 	if (ocs->rq_threads == 0) {
168 		rc = ocs_unsol_process(ocs, seq);
169 	} else {
170 		/* use the ox_id to dispatch this IO to a thread */
171 		fc_header_t *hdr = seq->header->dma.virt;
172 		uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
173 		uint32_t thr_index = ox_id % ocs->rq_threads;
174 
175 		rc = ocs_cbuf_put(xport->rq_thread_info[thr_index].seq_cbuf, seq);
176 	}
177 
178 	if (rc) {
179 		ocs_hw_sequence_free(&ocs->hw, seq);
180 	}
181 
182 	return 0;
183 }
184 
185 /**
186  * @ingroup unsol
187  * @brief Handle unsolicited FC frames.
188  *
189  * <h3 class="desc">Description</h3>
190  * This function is called either from ocs_unsolicited_cb() or ocs_unsol_rq_thread().
191  *
192  * @param ocs Pointer to the ocs structure.
193  * @param seq Header/payload sequence buffers.
194  *
195  * @return Returns 0 on success, or a negative error value on failure.
196  */
197 static int32_t
198 ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq)
199 {
200 	ocs_xport_fcfi_t *xport_fcfi = NULL;
201 	ocs_domain_t *domain;
202 	uint8_t seq_fcfi = seq->fcfi;
203 
204 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
205 	if (ocs->hw.workaround.override_fcfi) {
206 		if (ocs->hw.first_domain_idx > -1) {
207 			seq_fcfi = ocs->hw.first_domain_idx;
208 		}
209 	}
210 
211 	/* Range check seq->fcfi */
212 	if (seq_fcfi < ARRAY_SIZE(ocs->xport->fcfi)) {
213 		xport_fcfi = &ocs->xport->fcfi[seq_fcfi];
214 	}
215 
216 	/* If the transport FCFI entry is NULL, then drop the frame */
217 	if (xport_fcfi == NULL) {
218 		ocs_log_test(ocs, "FCFI %d is not valid, dropping frame\n", seq->fcfi);
219 		if (seq->hio != NULL) {
220 			ocs_port_owned_abort(ocs, seq->hio);
221 		}
222 
223 		ocs_hw_sequence_free(&ocs->hw, seq);
224 		return 0;
225 	}
226 	domain = ocs_hw_domain_get(&ocs->hw, seq_fcfi);
227 
228 	/*
229 	 * If we are holding frames or the domain is not yet registered or
230 	 * there's already frames on the pending list,
231 	 * then add the new frame to pending list
232 	 */
233 	if (domain == NULL ||
234 	    xport_fcfi->hold_frames ||
235 	    !ocs_list_empty(&xport_fcfi->pend_frames)) {
236 		ocs_lock(&xport_fcfi->pend_frames_lock);
237 			ocs_list_add_tail(&xport_fcfi->pend_frames, seq);
238 		ocs_unlock(&xport_fcfi->pend_frames_lock);
239 
240 		if (domain != NULL) {
241 			/* immediately process pending frames */
242 			ocs_domain_process_pending(domain);
243 		}
244 	} else {
245 		/*
246 		 * We are not holding frames and pending list is empty, just process frame.
247 		 * A non-zero return means the frame was not handled - so cleanup
248 		 */
249 		if (ocs_domain_dispatch_frame(domain, seq)) {
250 			if (seq->hio != NULL) {
251 				ocs_port_owned_abort(ocs, seq->hio);
252 			}
253 			ocs_hw_sequence_free(&ocs->hw, seq);
254 		}
255 	}
256 	return 0;
257 }
258 
259 /**
260  * @ingroup unsol
261  * @brief Process pending frames queued to the given node.
262  *
263  * <h3 class="desc">Description</h3>
264  * Frames that are queued for the \c node are dispatched and returned
265  * to the RQ.
266  *
267  * @param node Node of the queued frames that are to be dispatched.
268  *
269  * @return Returns 0 on success, or a negative error value on failure.
270  */
271 
272 int32_t
273 ocs_process_node_pending(ocs_node_t *node)
274 {
275 	ocs_t *ocs = node->ocs;
276 	ocs_hw_sequence_t *seq = NULL;
277 	uint32_t pend_frames_processed = 0;
278 
279 	for (;;) {
280 		/* need to check for hold frames condition after each frame processed
281 		 * because any given frame could cause a transition to a state that
282 		 * holds frames
283 		 */
284 		if (ocs_node_frames_held(node)) {
285 			break;
286 		}
287 
288 		/* Get next frame/sequence */
289 		ocs_lock(&node->pend_frames_lock);
290 			seq = ocs_list_remove_head(&node->pend_frames);
291 			if (seq == NULL) {
292 				pend_frames_processed = node->pend_frames_processed;
293 				node->pend_frames_processed = 0;
294 				ocs_unlock(&node->pend_frames_lock);
295 				break;
296 			}
297 			node->pend_frames_processed++;
298 		ocs_unlock(&node->pend_frames_lock);
299 
300 		/* now dispatch frame(s) to dispatch function */
301 		if (ocs_node_dispatch_frame(node, seq)) {
302 			if (seq->hio != NULL) {
303 				ocs_port_owned_abort(ocs, seq->hio);
304 			}
305 			ocs_hw_sequence_free(&ocs->hw, seq);
306 		}
307 	}
308 
309 	if (pend_frames_processed != 0) {
310 		ocs_log_debug(ocs, "%u node frames held and processed\n", pend_frames_processed);
311 	}
312 
313 	return 0;
314 }
315 
316 /**
317  * @ingroup unsol
318  * @brief Process pending frames queued to the given domain.
319  *
320  * <h3 class="desc">Description</h3>
321  * Frames that are queued for the \c domain are dispatched and
322  * returned to the RQ.
323  *
324  * @param domain Domain of the queued frames that are to be
325  *		 dispatched.
326  *
327  * @return Returns 0 on success, or a negative error value on failure.
328  */
329 
330 int32_t
331 ocs_domain_process_pending(ocs_domain_t *domain)
332 {
333 	ocs_t *ocs = domain->ocs;
334 	ocs_xport_fcfi_t *xport_fcfi;
335 	ocs_hw_sequence_t *seq = NULL;
336 	uint32_t pend_frames_processed = 0;
337 
338 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
339 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
340 
341 	for (;;) {
342 		/* need to check for hold frames condition after each frame processed
343 		 * because any given frame could cause a transition to a state that
344 		 * holds frames
345 		 */
346 		if (ocs_domain_frames_held(domain)) {
347 			break;
348 		}
349 
350 		/* Get next frame/sequence */
351 		ocs_lock(&xport_fcfi->pend_frames_lock);
352 			seq = ocs_list_remove_head(&xport_fcfi->pend_frames);
353 			if (seq == NULL) {
354 				pend_frames_processed = xport_fcfi->pend_frames_processed;
355 				xport_fcfi->pend_frames_processed = 0;
356 				ocs_unlock(&xport_fcfi->pend_frames_lock);
357 				break;
358 			}
359 			xport_fcfi->pend_frames_processed++;
360 		ocs_unlock(&xport_fcfi->pend_frames_lock);
361 
362 		/* now dispatch frame(s) to dispatch function */
363 		if (ocs_domain_dispatch_frame(domain, seq)) {
364 			if (seq->hio != NULL) {
365 				ocs_port_owned_abort(ocs, seq->hio);
366 			}
367 			ocs_hw_sequence_free(&ocs->hw, seq);
368 		}
369 	}
370 	if (pend_frames_processed != 0) {
371 		ocs_log_debug(ocs, "%u domain frames held and processed\n", pend_frames_processed);
372 	}
373 	return 0;
374 }
375 
376 /**
377  * @ingroup unsol
378  * @brief Purge given pending list
379  *
380  * <h3 class="desc">Description</h3>
381  * Frames that are queued on the given pending list are
382  * discarded and returned to the RQ.
383  *
384  * @param ocs Pointer to ocs object.
385  * @param pend_list Pending list to be purged.
386  * @param list_lock Lock that protects pending list.
387  *
388  * @return Returns 0 on success, or a negative error value on failure.
389  */
390 
391 static int32_t
392 ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock)
393 {
394 	ocs_hw_sequence_t *frame;
395 
396 	for (;;) {
397 		frame = ocs_frame_next(pend_list, list_lock);
398 		if (frame == NULL) {
399 			break;
400 		}
401 
402 		frame_printf(ocs, (fc_header_t*) frame->header->dma.virt, "Discarding held frame\n");
403 		if (frame->hio != NULL) {
404 			ocs_port_owned_abort(ocs, frame->hio);
405 		}
406 		ocs_hw_sequence_free(&ocs->hw, frame);
407 	}
408 
409 	return 0;
410 }
411 
412 /**
413  * @ingroup unsol
414  * @brief Purge node's pending (queued) frames.
415  *
416  * <h3 class="desc">Description</h3>
417  * Frames that are queued for the \c node are discarded and returned
418  * to the RQ.
419  *
420  * @param node Node of the queued frames that are to be discarded.
421  *
422  * @return Returns 0 on success, or a negative error value on failure.
423  */
424 
425 int32_t
426 ocs_node_purge_pending(ocs_node_t *node)
427 {
428 	return ocs_purge_pending(node->ocs, &node->pend_frames, &node->pend_frames_lock);
429 }
430 
431 /**
432  * @ingroup unsol
433  * @brief Purge xport's pending (queued) frames.
434  *
435  * <h3 class="desc">Description</h3>
436  * Frames that are queued for the \c xport are discarded and
437  * returned to the RQ.
438  *
439  * @param domain Pointer to domain object.
440  *
441  * @return Returns 0 on success; or a negative error value on failure.
442  */
443 
444 int32_t
445 ocs_domain_purge_pending(ocs_domain_t *domain)
446 {
447 	ocs_t *ocs = domain->ocs;
448 	ocs_xport_fcfi_t *xport_fcfi;
449 
450 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
451 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
452 	return ocs_purge_pending(domain->ocs,
453 				 &xport_fcfi->pend_frames,
454 				 &xport_fcfi->pend_frames_lock);
455 }
456 
457 /**
458  * @ingroup unsol
459  * @brief Check if node's pending frames are held.
460  *
461  * @param arg Node for which the pending frame hold condition is
462  * checked.
463  *
464  * @return Returns 1 if node is holding pending frames, or 0
465  * if not.
466  */
467 
468 static uint8_t
469 ocs_node_frames_held(void *arg)
470 {
471 	ocs_node_t *node = (ocs_node_t *)arg;
472 	return node->hold_frames;
473 }
474 
475 /**
476  * @ingroup unsol
477  * @brief Check if domain's pending frames are held.
478  *
479  * @param arg Domain for which the pending frame hold condition is
480  * checked.
481  *
482  * @return Returns 1 if domain is holding pending frames, or 0
483  * if not.
484  */
485 
486 static uint8_t
487 ocs_domain_frames_held(void *arg)
488 {
489 	ocs_domain_t *domain = (ocs_domain_t *)arg;
490 	ocs_t *ocs = domain->ocs;
491 	ocs_xport_fcfi_t *xport_fcfi;
492 
493 	ocs_assert(domain != NULL, 1);
494 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, 1);
495 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
496 	return xport_fcfi->hold_frames;
497 }
498 
499 /**
500  * @ingroup unsol
501  * @brief Globally (at xport level) hold unsolicited frames.
502  *
503  * <h3 class="desc">Description</h3>
504  * This function places a hold on processing unsolicited FC
505  * frames queued to the xport pending list.
506  *
507  * @param domain Pointer to domain object.
508  *
509  * @return Returns None.
510  */
511 
512 void
513 ocs_domain_hold_frames(ocs_domain_t *domain)
514 {
515 	ocs_t *ocs = domain->ocs;
516 	ocs_xport_fcfi_t *xport_fcfi;
517 
518 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
519 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
520 	if (!xport_fcfi->hold_frames) {
521 		ocs_log_debug(domain->ocs, "hold frames set for FCFI %d\n",
522 			      domain->fcf_indicator);
523 		xport_fcfi->hold_frames = 1;
524 	}
525 }
526 
527 /**
528  * @ingroup unsol
529  * @brief Clear hold on unsolicited frames.
530  *
531  * <h3 class="desc">Description</h3>
532  * This function clears the hold on processing unsolicited FC
533  * frames queued to the domain pending list.
534  *
535  * @param domain Pointer to domain object.
536  *
537  * @return Returns None.
538  */
539 
540 void
541 ocs_domain_accept_frames(ocs_domain_t *domain)
542 {
543 	ocs_t *ocs = domain->ocs;
544 	ocs_xport_fcfi_t *xport_fcfi;
545 
546 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
547 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
548 	if (xport_fcfi->hold_frames == 1) {
549 		ocs_log_debug(domain->ocs, "hold frames cleared for FCFI %d\n",
550 			      domain->fcf_indicator);
551 	}
552 	xport_fcfi->hold_frames = 0;
553 	ocs_domain_process_pending(domain);
554 }
555 
556 /**
557  * @ingroup unsol
558  * @brief Dispatch unsolicited FC frame.
559  *
560  * <h3 class="desc">Description</h3>
561  * This function processes an unsolicited FC frame queued at the
562  * domain level.
563  *
564  * @param arg Pointer to ocs object.
565  * @param seq Header/payload sequence buffers.
566  *
567  * @return Returns 0 if frame processed and RX buffers cleaned
568  * up appropriately, -1 if frame not handled.
569  */
570 
571 static __inline int32_t
572 ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
573 {
574 	ocs_domain_t *domain = (ocs_domain_t *)arg;
575 	ocs_t *ocs = domain->ocs;
576 	fc_header_t *hdr;
577 	uint32_t s_id;
578 	uint32_t d_id;
579 	ocs_node_t *node = NULL;
580 	ocs_sport_t *sport = NULL;
581 
582 	ocs_assert(seq->header, -1);
583 	ocs_assert(seq->header->dma.virt, -1);
584 	ocs_assert(seq->payload->dma.virt, -1);
585 
586 	hdr = seq->header->dma.virt;
587 
588 	/* extract the s_id and d_id */
589 	s_id = fc_be24toh(hdr->s_id);
590 	d_id = fc_be24toh(hdr->d_id);
591 
592 	sport = domain->sport;
593 	if (sport == NULL) {
594 		frame_printf(ocs, hdr, "phy sport for FC ID 0x%06x is NULL, dropping frame\n", d_id);
595 		return -1;
596 	}
597 
598 	if (sport->fc_id != d_id) {
599 		/* Not a physical port IO lookup sport associated with the npiv port */
600 		sport = ocs_sport_find(domain, d_id); /* Look up without lock */
601 		if (sport == NULL) {
602 			if (hdr->type == FC_TYPE_FCP) {
603 				/* Drop frame */
604 				ocs_log_warn(ocs, "unsolicited FCP frame with invalid d_id x%x, dropping\n",
605 					     d_id);
606 				return -1;
607 			} else {
608 				/* p2p will use this case */
609 				sport = domain->sport;
610 			}
611 		}
612 	}
613 
614 	/* Lookup the node given the remote s_id */
615 	node = ocs_node_find(sport, s_id);
616 
617 	/* If not found, then create a new node */
618 	if (node == NULL) {
619 		/* If this is solicited data or control based on R_CTL and there is no node context,
620 		 * then we can drop the frame
621 		 */
622 		if ((hdr->r_ctl == FC_RCTL_FC4_DATA) && (
623 		    (hdr->info == FC_RCTL_INFO_SOL_DATA) || (hdr->info == FC_RCTL_INFO_SOL_CTRL))) {
624 			ocs_log_debug(ocs, "solicited data/ctrl frame without node, dropping\n");
625 			return -1;
626 		}
627 		node = ocs_node_alloc(sport, s_id, FALSE, FALSE);
628 		if (node == NULL) {
629 			ocs_log_err(ocs, "ocs_node_alloc() failed\n");
630 			return -1;
631 		}
632 		/* don't send PLOGI on ocs_d_init entry */
633 		ocs_node_init_device(node, FALSE);
634 	}
635 
636 	if (node->hold_frames || !ocs_list_empty((&node->pend_frames))) {
637 		/* TODO: info log level
638 		frame_printf(ocs, hdr, "Holding frame\n");
639 		*/
640 		/* add frame to node's pending list */
641 		ocs_lock(&node->pend_frames_lock);
642 			ocs_list_add_tail(&node->pend_frames, seq);
643 		ocs_unlock(&node->pend_frames_lock);
644 
645 		return 0;
646 	}
647 
648 	/* now dispatch frame to the node frame handler */
649 	return ocs_node_dispatch_frame(node, seq);
650 }
651 
652 /**
653  * @ingroup unsol
654  * @brief Dispatch a frame.
655  *
656  * <h3 class="desc">Description</h3>
657  * A frame is dispatched from the \c node to the handler.
658  *
659  * @param arg Node that originated the frame.
660  * @param seq Header/payload sequence buffers.
661  *
662  * @return Returns 0 if frame processed and RX buffers cleaned
663  * up appropriately, -1 if frame not handled.
664  */
665 static int32_t
666 ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
667 {
668 
669 	fc_header_t *hdr = seq->header->dma.virt;
670 	uint32_t port_id;
671 	ocs_node_t *node = (ocs_node_t *)arg;
672 	int32_t rc = -1;
673 	int32_t sit_set = 0;
674 
675 	port_id = fc_be24toh(hdr->s_id);
676 	ocs_assert(port_id == node->rnode.fc_id, -1);
677 
678 	if (fc_be24toh(hdr->f_ctl) & FC_FCTL_END_SEQUENCE) {
679 		/*if SIT is set */
680 		if (fc_be24toh(hdr->f_ctl) & FC_FCTL_SEQUENCE_INITIATIVE) {
681 			sit_set = 1;
682 		}
683 		switch (hdr->r_ctl) {
684 		case FC_RCTL_ELS:
685 			if (sit_set) {
686 				rc = ocs_node_recv_els_frame(node, seq);
687 			}
688 			break;
689 
690 		case FC_RCTL_BLS:
691 			if ((sit_set) && (hdr->info == FC_INFO_ABTS)) {
692 				rc = ocs_node_recv_abts_frame(node, seq);
693 			}else {
694 				rc = ocs_node_recv_bls_no_sit(node, seq);
695 			}
696 			break;
697 
698 		case FC_RCTL_FC4_DATA:
699 			switch(hdr->type) {
700 			case FC_TYPE_FCP:
701 				if (hdr->info == FC_RCTL_INFO_UNSOL_CMD) {
702 					if (node->fcp_enabled) {
703 						if (sit_set) {
704 							rc = ocs_dispatch_fcp_cmd(node, seq);
705 						}else {
706 							/* send the auto xfer ready command */
707 							rc = ocs_dispatch_fcp_cmd_auto_xfer_rdy(node, seq);
708 						}
709 					} else {
710 						rc = ocs_node_recv_fcp_cmd(node, seq);
711 					}
712 				} else if (hdr->info == FC_RCTL_INFO_SOL_DATA) {
713 					if (sit_set) {
714 						rc = ocs_dispatch_fcp_data(node, seq);
715 					}
716 				}
717 				break;
718 			case FC_TYPE_GS:
719 				if (sit_set) {
720 					rc = ocs_node_recv_ct_frame(node, seq);
721 				}
722 				break;
723 			default:
724 				break;
725 			}
726 			break;
727 		}
728 	} else {
729 		node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
730 			    ocs_htobe32(((uint32_t *)hdr)[0]),
731 			    ocs_htobe32(((uint32_t *)hdr)[1]),
732 			    ocs_htobe32(((uint32_t *)hdr)[2]),
733 			    ocs_htobe32(((uint32_t *)hdr)[3]),
734 			    ocs_htobe32(((uint32_t *)hdr)[4]),
735 			    ocs_htobe32(((uint32_t *)hdr)[5]));
736 	}
737 	return rc;
738 }
739 
740 /**
741  * @ingroup unsol
742  * @brief Dispatch unsolicited FCP frames (RQ Pair).
743  *
744  * <h3 class="desc">Description</h3>
745  * Dispatch unsolicited FCP frames (called from the device node state machine).
746  *
747  * @param io Pointer to the IO context.
748  * @param task_management_flags Task management flags from the FCP_CMND frame.
749  * @param node Node that originated the frame.
750  * @param lun 32-bit LUN from FCP_CMND frame.
751  *
752  * @return Returns None.
753  */
754 
755 static void
756 ocs_dispatch_unsolicited_tmf(ocs_io_t *io, uint8_t task_management_flags, ocs_node_t *node, uint64_t lun)
757 {
758 	uint32_t i;
759 	struct {
760 		uint32_t mask;
761 		ocs_scsi_tmf_cmd_e cmd;
762 	} tmflist[] = {
763 		{FCP_QUERY_TASK_SET,		OCS_SCSI_TMF_QUERY_TASK_SET},
764 		{FCP_ABORT_TASK_SET,		OCS_SCSI_TMF_ABORT_TASK_SET},
765 		{FCP_CLEAR_TASK_SET,		OCS_SCSI_TMF_CLEAR_TASK_SET},
766 		{FCP_QUERY_ASYNCHRONOUS_EVENT,	OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT},
767 		{FCP_LOGICAL_UNIT_RESET,	OCS_SCSI_TMF_LOGICAL_UNIT_RESET},
768 		{FCP_TARGET_RESET,		OCS_SCSI_TMF_TARGET_RESET},
769 		{FCP_CLEAR_ACA,			OCS_SCSI_TMF_CLEAR_ACA}};
770 
771 	io->exp_xfer_len = 0; /* BUG 32235 */
772 
773 	for (i = 0; i < ARRAY_SIZE(tmflist); i ++) {
774 		if (tmflist[i].mask & task_management_flags) {
775 			io->tmf_cmd = tmflist[i].cmd;
776 			ocs_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
777 			break;
778 		}
779 	}
780 	if (i == ARRAY_SIZE(tmflist)) {
781 		/* Not handled */
782 		node_printf(node, "TMF x%x rejected\n", task_management_flags);
783 		ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_REJECTED, NULL, ocs_fc_tmf_rejected_cb, NULL);
784 	}
785 }
786 
787 static int32_t
788 ocs_validate_fcp_cmd(ocs_t *ocs, ocs_hw_sequence_t *seq)
789 {
790 	size_t		exp_payload_len = 0;
791 	fcp_cmnd_iu_t *cmnd = seq->payload->dma.virt;
792 	exp_payload_len = sizeof(fcp_cmnd_iu_t) - 16 + cmnd->additional_fcp_cdb_length;
793 
794 	/*
795 	 * If we received less than FCP_CMND_IU bytes, assume that the frame is
796 	 * corrupted in some way and drop it. This was seen when jamming the FCTL
797 	 * fill bytes field.
798 	 */
799 	if (seq->payload->dma.len < exp_payload_len) {
800 		fc_header_t	*fchdr = seq->header->dma.virt;
801 		ocs_log_debug(ocs, "dropping ox_id %04x with payload length (%zd) less than expected (%zd)\n",
802 			      ocs_be16toh(fchdr->ox_id), seq->payload->dma.len,
803 			      exp_payload_len);
804 		return -1;
805 	}
806 	return 0;
807 
808 }
809 
810 static void
811 ocs_populate_io_fcp_cmd(ocs_io_t *io, fcp_cmnd_iu_t *cmnd, fc_header_t *fchdr, uint8_t sit)
812 {
813 	uint32_t	*fcp_dl;
814 	io->init_task_tag = ocs_be16toh(fchdr->ox_id);
815 	/* note, tgt_task_tag, hw_tag  set when HW io is allocated */
816 	fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl));
817 	fcp_dl += cmnd->additional_fcp_cdb_length;
818 	io->exp_xfer_len = ocs_be32toh(*fcp_dl);
819 	io->transferred = 0;
820 
821 	/* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
822 	 * Our assertion here is, the priority given to a frame containing
823 	 * the FCP cmd should be the priority given to ALL frames contained
824 	 * in that IO. Thus we need to save the incoming CS_CTL here.
825 	 */
826 	if (fc_be24toh(fchdr->f_ctl) & FC_FCTL_PRIORITY_ENABLE) {
827 		io->cs_ctl = fchdr->cs_ctl;
828 	} else {
829 		io->cs_ctl = 0;
830 	}
831 	io->seq_init = sit;
832 }
833 
834 static uint32_t
835 ocs_get_flags_fcp_cmd(fcp_cmnd_iu_t *cmnd)
836 {
837 	uint32_t flags = 0;
838 	switch (cmnd->task_attribute) {
839 	case FCP_TASK_ATTR_SIMPLE:
840 		flags |= OCS_SCSI_CMD_SIMPLE;
841 		break;
842 	case FCP_TASK_ATTR_HEAD_OF_QUEUE:
843 		flags |= OCS_SCSI_CMD_HEAD_OF_QUEUE;
844 		break;
845 	case FCP_TASK_ATTR_ORDERED:
846 		flags |= OCS_SCSI_CMD_ORDERED;
847 		break;
848 	case FCP_TASK_ATTR_ACA:
849 		flags |= OCS_SCSI_CMD_ACA;
850 		break;
851 	case FCP_TASK_ATTR_UNTAGGED:
852 		flags |= OCS_SCSI_CMD_UNTAGGED;
853 		break;
854 	}
855 	flags |= (uint32_t)cmnd->command_priority << OCS_SCSI_PRIORITY_SHIFT;
856 	if (cmnd->wrdata)
857 		flags |= OCS_SCSI_CMD_DIR_IN;
858 	if (cmnd->rddata)
859 		flags |= OCS_SCSI_CMD_DIR_OUT;
860 
861 	return flags;
862 }
863 
864 /**
865  * @ingroup unsol
866  * @brief Dispatch unsolicited FCP_CMND frame.
867  *
868  * <h3 class="desc">Description</h3>
869  * Dispatch unsolicited FCP_CMND frame. RQ Pair mode - always
870  * used for RQ Pair mode since first burst is not supported.
871  *
872  * @param node Node that originated the frame.
873  * @param seq Header/payload sequence buffers.
874  *
875  * @return Returns 0 if frame processed and RX buffers cleaned
876  * up appropriately, -1 if frame not handled and RX buffers need
877  * to be returned.
878  */
879 static int32_t
880 ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
881 {
882 	ocs_t *ocs = node->ocs;
883 	fc_header_t	*fchdr = seq->header->dma.virt;
884 	fcp_cmnd_iu_t	*cmnd = NULL;
885 	ocs_io_t	*io = NULL;
886 	fc_vm_header_t 	*vhdr;
887 	uint8_t 	df_ctl;
888 	uint64_t	lun = UINT64_MAX;
889 	int32_t		rc = 0;
890 
891 	ocs_assert(seq->payload, -1);
892 	cmnd = seq->payload->dma.virt;
893 
894 	/* perform FCP_CMND validation check(s) */
895 	if (ocs_validate_fcp_cmd(ocs, seq)) {
896 		return -1;
897 	}
898 
899 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
900 	if (lun == UINT64_MAX) {
901 		return -1;
902 	}
903 
904 	io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
905 	if (io == NULL) {
906 		uint32_t send_frame_capable;
907 
908 		/* If we have SEND_FRAME capability, then use it to send task set full or busy */
909 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
910 		if ((rc == 0) && send_frame_capable) {
911 			rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
912 			if (rc) {
913 				ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
914 			}
915 			return rc;
916 		}
917 
918 		ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
919 		return -1;
920 	}
921 	io->hw_priv = seq->hw_priv;
922 
923 	/* Check if the CMD has vmheader. */
924 	io->app_id = 0;
925 	df_ctl = fchdr->df_ctl;
926 	if (df_ctl & FC_DFCTL_DEVICE_HDR_16_MASK) {
927 		uint32_t vmhdr_offset = 0;
928 		/* Presence of VMID. Get the vm header offset. */
929 		if (df_ctl & FC_DFCTL_ESP_HDR_MASK) {
930 			vmhdr_offset += FC_DFCTL_ESP_HDR_SIZE;
931 			ocs_log_err(ocs, "ESP Header present. Fix ESP Size.\n");
932 		}
933 
934 		if (df_ctl & FC_DFCTL_NETWORK_HDR_MASK) {
935 			vmhdr_offset += FC_DFCTL_NETWORK_HDR_SIZE;
936 		}
937 		vhdr = (fc_vm_header_t *) ((char *)fchdr + sizeof(fc_header_t) + vmhdr_offset);
938 		io->app_id = ocs_be32toh(vhdr->src_vmid);
939 	}
940 
941 	/* RQ pair, if we got here, SIT=1 */
942 	ocs_populate_io_fcp_cmd(io, cmnd, fchdr, TRUE);
943 
944 	if (cmnd->task_management_flags) {
945 		ocs_dispatch_unsolicited_tmf(io, cmnd->task_management_flags, node, lun);
946 	} else {
947 		uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
948 
949 		/* can return failure for things like task set full and UAs,
950 		 * no need to treat as a dropped frame if rc != 0
951 		 */
952 		ocs_scsi_recv_cmd(io, lun, cmnd->fcp_cdb,
953 				  sizeof(cmnd->fcp_cdb) +
954 				  (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
955 				  flags);
956 	}
957 
958 	/* successfully processed, now return RX buffer to the chip */
959 	ocs_hw_sequence_free(&ocs->hw, seq);
960 	return 0;
961 }
962 
963 /**
964  * @ingroup unsol
965  * @brief Dispatch unsolicited FCP_CMND frame (auto xfer rdy).
966  *
967  * <h3 class="desc">Description</h3>
968  * Dispatch unsolicited FCP_CMND frame that is assisted with auto xfer ready.
969  *
970  * @param node Node that originated the frame.
971  * @param seq Header/payload sequence buffers.
972  *
973  * @return Returns 0 if frame processed and RX buffers cleaned
974  * up appropriately, -1 if frame not handled and RX buffers need
975  * to be returned.
976  */
977 static int32_t
978 ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq)
979 {
980 	ocs_t *ocs = node->ocs;
981 	fc_header_t	*fchdr = seq->header->dma.virt;
982 	fcp_cmnd_iu_t	*cmnd = NULL;
983 	ocs_io_t	*io = NULL;
984 	uint64_t	lun = UINT64_MAX;
985 	int32_t		rc = 0;
986 
987 	ocs_assert(seq->payload, -1);
988 	cmnd = seq->payload->dma.virt;
989 
990 	/* perform FCP_CMND validation check(s) */
991 	if (ocs_validate_fcp_cmd(ocs, seq)) {
992 		return -1;
993 	}
994 
995 	/* make sure first burst or auto xfer_rdy is enabled */
996 	if (!seq->auto_xrdy) {
997 		node_printf(node, "IO is not Auto Xfr Rdy assisted, dropping FCP_CMND\n");
998 		return -1;
999 	}
1000 
1001 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
1002 
1003 	/* TODO should there be a check here for an error? Why do any of the
1004 	 * below if the LUN decode failed? */
1005 	io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
1006 	if (io == NULL) {
1007 		uint32_t send_frame_capable;
1008 
1009 		/* If we have SEND_FRAME capability, then use it to send task set full or busy */
1010 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
1011 		if ((rc == 0) && send_frame_capable) {
1012 			rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
1013 			if (rc) {
1014 				ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
1015 			}
1016 			return rc;
1017 		}
1018 
1019 		ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
1020 		return -1;
1021 	}
1022 	io->hw_priv = seq->hw_priv;
1023 
1024 	/* RQ pair, if we got here, SIT=0 */
1025 	ocs_populate_io_fcp_cmd(io, cmnd, fchdr, FALSE);
1026 
1027 	if (cmnd->task_management_flags) {
1028 		/* first burst command better not be a TMF */
1029 		ocs_log_err(ocs, "TMF flags set 0x%x\n", cmnd->task_management_flags);
1030 		ocs_scsi_io_free(io);
1031 		return -1;
1032 	} else {
1033 		uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
1034 
1035 		/* activate HW IO */
1036 		ocs_hw_io_activate_port_owned(&ocs->hw, seq->hio);
1037 		io->hio = seq->hio;
1038 		seq->hio->ul_io = io;
1039 		io->tgt_task_tag = seq->hio->indicator;
1040 
1041 		/* Note: Data buffers are received in another call */
1042 		ocs_scsi_recv_cmd_first_burst(io, lun, cmnd->fcp_cdb,
1043 					      sizeof(cmnd->fcp_cdb) +
1044 					      (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
1045 					      flags, NULL, 0);
1046 	}
1047 
1048 	/* FCP_CMND processed, return RX buffer to the chip */
1049 	ocs_hw_sequence_free(&ocs->hw, seq);
1050 	return 0;
1051 }
1052 
1053 /**
1054  * @ingroup unsol
1055  * @brief Dispatch FCP data frames for auto xfer ready.
1056  *
1057  * <h3 class="desc">Description</h3>
1058  * Dispatch unsolicited FCP data frames (auto xfer ready)
1059  * containing sequence initiative transferred (SIT=1).
1060  *
1061  * @param node Node that originated the frame.
1062  * @param seq Header/payload sequence buffers.
1063  *
1064  * @return Returns 0 if frame processed and RX buffers cleaned
1065  * up appropriately, -1 if frame not handled.
1066  */
1067 
1068 static int32_t
1069 ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq)
1070 {
1071 	ocs_t *ocs = node->ocs;
1072 	ocs_hw_t *hw = &ocs->hw;
1073 	ocs_hw_io_t *hio = seq->hio;
1074 	ocs_io_t	*io;
1075 	ocs_dma_t fburst[1];
1076 
1077 	ocs_assert(seq->payload, -1);
1078 	ocs_assert(hio, -1);
1079 
1080 	io = hio->ul_io;
1081 	if (io == NULL) {
1082 		ocs_log_err(ocs, "data received for NULL io, xri=0x%x\n",
1083 			    hio->indicator);
1084 		return -1;
1085 	}
1086 
1087 	/*
1088 	 * We only support data completions for auto xfer ready. Make sure
1089 	 * this is a port owned XRI.
1090 	 */
1091 	if (!ocs_hw_is_io_port_owned(hw, seq->hio)) {
1092 		ocs_log_err(ocs, "data received for host owned XRI, xri=0x%x\n",
1093 			    hio->indicator);
1094 		return -1;
1095 	}
1096 
1097 	/* For error statuses, pass the error to the target back end */
1098 	if (seq->status != OCS_HW_UNSOL_SUCCESS) {
1099 		ocs_log_err(ocs, "data with status 0x%x received, xri=0x%x\n",
1100 			    seq->status, hio->indicator);
1101 
1102 		/*
1103 		 * In this case, there is an existing, in-use HW IO that
1104 		 * first may need to be aborted. Then, the backend will be
1105 		 * notified of the error while waiting for the data.
1106 		 */
1107 		ocs_port_owned_abort(ocs, seq->hio);
1108 
1109 		/*
1110 		 * HW IO has already been allocated and is waiting for data.
1111 		 * Need to tell backend that an error has occurred.
1112 		 */
1113 		ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, OCS_SCSI_FIRST_BURST_ERR, NULL, 0);
1114 		return -1;
1115 	}
1116 
1117 	/* sequence initiative has been transferred */
1118 	io->seq_init = 1;
1119 
1120 	/* convert the array of pointers to the correct type, to send to backend */
1121 	fburst[0] = seq->payload->dma;
1122 
1123 	/* the amount of first burst data was saved as "acculated sequence length" */
1124 	io->transferred = seq->payload->dma.len;
1125 
1126 	if (ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, 0,
1127 					  fburst, io->transferred)) {
1128 		ocs_log_err(ocs, "error passing first burst, xri=0x%x, oxid=0x%x\n",
1129 			    hio->indicator, io->init_task_tag);
1130 	}
1131 
1132 	/* Free the header and all the accumulated payload buffers */
1133 	ocs_hw_sequence_free(&ocs->hw, seq);
1134 	return 0;
1135 }
1136 
1137 /**
1138  * @ingroup unsol
1139  * @brief Handle the callback for the TMF FUNCTION_REJECTED response.
1140  *
1141  * <h3 class="desc">Description</h3>
1142  * Handle the callback of a send TMF FUNCTION_REJECTED response request.
1143  *
1144  * @param io Pointer to the IO context.
1145  * @param scsi_status Status of the response.
1146  * @param flags Callback flags.
1147  * @param arg Callback argument.
1148  *
1149  * @return Returns 0 on success, or a negative error value on failure.
1150  */
1151 
1152 static int32_t
1153 ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg)
1154 {
1155 	ocs_scsi_io_free(io);
1156 	return 0;
1157 }
1158 
1159 /**
1160  * @brief Return next FC frame on node->pend_frames list
1161  *
1162  * The next FC frame on the node->pend_frames list is returned, or NULL
1163  * if the list is empty.
1164  *
1165  * @param pend_list Pending list to be purged.
1166  * @param list_lock Lock that protects pending list.
1167  *
1168  * @return Returns pointer to the next FC frame, or NULL if the pending frame list
1169  * is empty.
1170  */
1171 static ocs_hw_sequence_t *
1172 ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock)
1173 {
1174 	ocs_hw_sequence_t *frame = NULL;
1175 
1176 	ocs_lock(list_lock);
1177 		frame = ocs_list_remove_head(pend_list);
1178 	ocs_unlock(list_lock);
1179 	return frame;
1180 }
1181 
1182 /**
1183  * @brief Process send fcp response frame callback
1184  *
1185  * The function is called when the send FCP response posting has completed. Regardless
1186  * of the outcome, the sequence is freed.
1187  *
1188  * @param arg Pointer to originator frame sequence.
1189  * @param cqe Pointer to completion queue entry.
1190  * @param status Status of operation.
1191  *
1192  * @return None.
1193  */
1194 static void
1195 ocs_sframe_common_send_cb(void *arg, uint8_t *cqe, int32_t status)
1196 {
1197 	ocs_hw_send_frame_context_t *ctx = arg;
1198 	ocs_hw_t *hw = ctx->hw;
1199 
1200 	/* Free WQ completion callback */
1201 	ocs_hw_reqtag_free(hw, ctx->wqcb);
1202 
1203 	/* Free sequence */
1204 	ocs_hw_sequence_free(hw, ctx->seq);
1205 }
1206 
1207 /**
1208  * @brief Send a frame, common code
1209  *
1210  * A frame is sent using SEND_FRAME, the R_CTL/F_CTL/TYPE may be specified, the payload is
1211  * sent as a single frame.
1212  *
1213  * Memory resources are allocated from RQ buffers contained in the passed in sequence data.
1214  *
1215  * @param node Pointer to node object.
1216  * @param seq Pointer to sequence object.
1217  * @param r_ctl R_CTL value to place in FC header.
1218  * @param info INFO value to place in FC header.
1219  * @param f_ctl F_CTL value to place in FC header.
1220  * @param type TYPE value to place in FC header.
1221  * @param payload Pointer to payload data
1222  * @param payload_len Length of payload in bytes.
1223  *
1224  * @return Returns 0 on success, or a negative error code value on failure.
1225  */
1226 static int32_t
1227 ocs_sframe_common_send(ocs_node_t *node, ocs_hw_sequence_t *seq, uint8_t r_ctl, uint8_t info, uint32_t f_ctl,
1228 		       uint8_t type, void *payload, uint32_t payload_len)
1229 {
1230 	ocs_t *ocs = node->ocs;
1231 	ocs_hw_t *hw = &ocs->hw;
1232 	ocs_hw_rtn_e rc = 0;
1233 	fc_header_t *behdr = seq->header->dma.virt;
1234 	fc_header_le_t hdr;
1235 	uint32_t s_id = fc_be24toh(behdr->s_id);
1236 	uint32_t d_id = fc_be24toh(behdr->d_id);
1237 	uint16_t ox_id = ocs_be16toh(behdr->ox_id);
1238 	uint16_t rx_id = ocs_be16toh(behdr->rx_id);
1239 	ocs_hw_send_frame_context_t *ctx;
1240 
1241 	uint32_t heap_size = seq->payload->dma.size;
1242 	uintptr_t heap_phys_base = seq->payload->dma.phys;
1243 	uint8_t *heap_virt_base = seq->payload->dma.virt;
1244 	uint32_t heap_offset = 0;
1245 
1246 	/* Build the FC header reusing the RQ header DMA buffer */
1247 	ocs_memset(&hdr, 0, sizeof(hdr));
1248 	hdr.d_id = s_id;			/* send it back to whomever sent it to us */
1249 	hdr.r_ctl = r_ctl;
1250 	hdr.info = info;
1251 	hdr.s_id = d_id;
1252 	hdr.cs_ctl = 0;
1253 	hdr.f_ctl = f_ctl;
1254 	hdr.type = type;
1255 	hdr.seq_cnt = 0;
1256 	hdr.df_ctl = 0;
1257 
1258 	/*
1259 	 * send_frame_seq_id is an atomic, we just let it increment, while storing only
1260 	 * the low 8 bits to hdr->seq_id
1261 	 */
1262 	hdr.seq_id = (uint8_t) ocs_atomic_add_return(&hw->send_frame_seq_id, 1);
1263 
1264 	hdr.rx_id = rx_id;
1265 	hdr.ox_id = ox_id;
1266 	hdr.parameter = 0;
1267 
1268 	/* Allocate and fill in the send frame request context */
1269 	ctx = (void*)(heap_virt_base + heap_offset);
1270 	heap_offset += sizeof(*ctx);
1271 	ocs_assert(heap_offset < heap_size, -1);
1272 	ocs_memset(ctx, 0, sizeof(*ctx));
1273 
1274 	/* Save sequence */
1275 	ctx->seq = seq;
1276 
1277 	/* Allocate a response payload DMA buffer from the heap */
1278 	ctx->payload.phys = heap_phys_base + heap_offset;
1279 	ctx->payload.virt = heap_virt_base + heap_offset;
1280 	ctx->payload.size = payload_len;
1281 	ctx->payload.len = payload_len;
1282 	heap_offset += payload_len;
1283 	ocs_assert(heap_offset <= heap_size, -1);
1284 
1285 	/* Copy the payload in */
1286 	ocs_memcpy(ctx->payload.virt, payload, payload_len);
1287 
1288 	/* Send */
1289 	rc = ocs_hw_send_frame(&ocs->hw, (void*)&hdr, FC_SOFI3, FC_EOFT, &ctx->payload, ctx,
1290 				ocs_sframe_common_send_cb, ctx);
1291 	if (rc) {
1292 		ocs_log_test(ocs, "ocs_hw_send_frame failed: %d\n", rc);
1293 	}
1294 
1295 	return rc ? -1 : 0;
1296 }
1297 
1298 /**
1299  * @brief Send FCP response using SEND_FRAME
1300  *
1301  * The FCP response is send using the SEND_FRAME function.
1302  *
1303  * @param node Pointer to node object.
1304  * @param seq Pointer to inbound sequence.
1305  * @param rsp Pointer to response data.
1306  * @param rsp_len Length of response data, in bytes.
1307  *
1308  * @return Returns 0 on success, or a negative error code value on failure.
1309  */
1310 static int32_t
1311 ocs_sframe_send_fcp_rsp(ocs_node_t *node, ocs_hw_sequence_t *seq, void *rsp, uint32_t rsp_len)
1312 {
1313 	return ocs_sframe_common_send(node, seq,
1314 				      FC_RCTL_FC4_DATA,
1315 				      FC_RCTL_INFO_CMD_STATUS,
1316 				      FC_FCTL_EXCHANGE_RESPONDER |
1317 					      FC_FCTL_LAST_SEQUENCE |
1318 					      FC_FCTL_END_SEQUENCE |
1319 					      FC_FCTL_SEQUENCE_INITIATIVE,
1320 				      FC_TYPE_FCP,
1321 				      rsp, rsp_len);
1322 }
1323 
1324 /**
1325  * @brief Send task set full response
1326  *
1327  * Return a task set full or busy response using send frame.
1328  *
1329  * @param node Pointer to node object.
1330  * @param seq Pointer to originator frame sequence.
1331  *
1332  * @return Returns 0 on success, or a negative error code value on failure.
1333  */
1334 static int32_t
1335 ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq)
1336 {
1337 	fcp_rsp_iu_t fcprsp;
1338 	fcp_cmnd_iu_t *fcpcmd = seq->payload->dma.virt;
1339 	uint32_t *fcp_dl_ptr;
1340 	uint32_t fcp_dl;
1341 	int32_t rc = 0;
1342 
1343 	/* extract FCP_DL from FCP command*/
1344 	fcp_dl_ptr = (uint32_t*)(&(fcpcmd->fcp_cdb_and_dl));
1345 	fcp_dl_ptr += fcpcmd->additional_fcp_cdb_length;
1346 	fcp_dl = ocs_be32toh(*fcp_dl_ptr);
1347 
1348 	/* construct task set full or busy response */
1349 	ocs_memset(&fcprsp, 0, sizeof(fcprsp));
1350 	ocs_lock(&node->active_ios_lock);
1351 		fcprsp.scsi_status = ocs_list_empty(&node->active_ios) ? SCSI_STATUS_BUSY : SCSI_STATUS_TASK_SET_FULL;
1352 	ocs_unlock(&node->active_ios_lock);
1353 	*((uint32_t*)&fcprsp.fcp_resid) = fcp_dl;
1354 
1355 	/* send it using send_frame */
1356 	rc = ocs_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp) - sizeof(fcprsp.data));
1357 	if (rc) {
1358 		ocs_log_test(node->ocs, "ocs_sframe_send_fcp_rsp failed: %d\n", rc);
1359 	}
1360 	return rc;
1361 }
1362 
1363 /**
1364  * @brief Send BA_ACC using sent frame
1365  *
1366  * A BA_ACC is sent using SEND_FRAME
1367  *
1368  * @param node Pointer to node object.
1369  * @param seq Pointer to originator frame sequence.
1370  *
1371  * @return Returns 0 on success, or a negative error code value on failure.
1372  */
1373 int32_t
1374 ocs_sframe_send_bls_acc(ocs_node_t *node,  ocs_hw_sequence_t *seq)
1375 {
1376 	fc_header_t *behdr = seq->header->dma.virt;
1377 	uint16_t ox_id = ocs_be16toh(behdr->ox_id);
1378 	uint16_t rx_id = ocs_be16toh(behdr->rx_id);
1379 	fc_ba_acc_payload_t acc = {0};
1380 
1381 	acc.ox_id = ocs_htobe16(ox_id);
1382 	acc.rx_id = ocs_htobe16(rx_id);
1383 	acc.low_seq_cnt = UINT16_MAX;
1384 	acc.high_seq_cnt = UINT16_MAX;
1385 
1386 	return ocs_sframe_common_send(node, seq,
1387 				      FC_RCTL_BLS,
1388 				      FC_RCTL_INFO_UNSOL_DATA,
1389 				      FC_FCTL_EXCHANGE_RESPONDER |
1390 					      FC_FCTL_LAST_SEQUENCE |
1391 					      FC_FCTL_END_SEQUENCE,
1392 				      FC_TYPE_BASIC_LINK,
1393 				      &acc, sizeof(acc));
1394 }
1395