xref: /linux/drivers/scsi/qedf/qedf_io.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  QLogic FCoE Offload Driver
4  *  Copyright (c) 2016-2018 Cavium Inc.
5  */
6 #include <linux/spinlock.h>
7 #include <linux/vmalloc.h>
8 #include "qedf.h"
9 #include <scsi/scsi_tcq.h>
10 
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12 	unsigned int timer_msec)
13 {
14 	queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15 	    msecs_to_jiffies(timer_msec));
16 }
17 
18 static void qedf_cmd_timeout(struct work_struct *work)
19 {
20 
21 	struct qedf_ioreq *io_req =
22 	    container_of(work, struct qedf_ioreq, timeout_work.work);
23 	struct qedf_ctx *qedf;
24 	struct qedf_rport *fcport;
25 
26 	fcport = io_req->fcport;
27 	if (io_req->fcport == NULL) {
28 		QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
29 		return;
30 	}
31 
32 	qedf = fcport->qedf;
33 
34 	switch (io_req->cmd_type) {
35 	case QEDF_ABTS:
36 		if (qedf == NULL) {
37 			QEDF_INFO(NULL, QEDF_LOG_IO,
38 				  "qedf is NULL for ABTS xid=0x%x.\n",
39 				  io_req->xid);
40 			return;
41 		}
42 
43 		QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
44 		    io_req->xid);
45 		/* Cleanup timed out ABTS */
46 		qedf_initiate_cleanup(io_req, true);
47 		complete(&io_req->abts_done);
48 
49 		/*
50 		 * Need to call kref_put for reference taken when initiate_abts
51 		 * was called since abts_compl won't be called now that we've
52 		 * cleaned up the task.
53 		 */
54 		kref_put(&io_req->refcount, qedf_release_cmd);
55 
56 		/* Clear in abort bit now that we're done with the command */
57 		clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
58 
59 		/*
60 		 * Now that the original I/O and the ABTS are complete see
61 		 * if we need to reconnect to the target.
62 		 */
63 		qedf_restart_rport(fcport);
64 		break;
65 	case QEDF_ELS:
66 		if (!qedf) {
67 			QEDF_INFO(NULL, QEDF_LOG_IO,
68 				  "qedf is NULL for ELS xid=0x%x.\n",
69 				  io_req->xid);
70 			return;
71 		}
72 		/* ELS request no longer outstanding since it timed out */
73 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
74 
75 		kref_get(&io_req->refcount);
76 		/*
77 		 * Don't attempt to clean an ELS timeout as any subseqeunt
78 		 * ABTS or cleanup requests just hang.  For now just free
79 		 * the resources of the original I/O and the RRQ
80 		 */
81 		QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
82 			  io_req->xid);
83 		qedf_initiate_cleanup(io_req, true);
84 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
85 		/* Call callback function to complete command */
86 		if (io_req->cb_func && io_req->cb_arg) {
87 			io_req->cb_func(io_req->cb_arg);
88 			io_req->cb_arg = NULL;
89 		}
90 		kref_put(&io_req->refcount, qedf_release_cmd);
91 		break;
92 	case QEDF_SEQ_CLEANUP:
93 		QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
94 		    "xid=0x%x.\n", io_req->xid);
95 		qedf_initiate_cleanup(io_req, true);
96 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
97 		qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
98 		break;
99 	default:
100 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
101 			  "Hit default case, xid=0x%x.\n", io_req->xid);
102 		break;
103 	}
104 }
105 
106 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
107 {
108 	struct io_bdt *bdt_info;
109 	struct qedf_ctx *qedf = cmgr->qedf;
110 	size_t bd_tbl_sz;
111 	u16 min_xid = 0;
112 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
113 	int num_ios;
114 	int i;
115 	struct qedf_ioreq *io_req;
116 
117 	num_ios = max_xid - min_xid + 1;
118 
119 	/* Free fcoe_bdt_ctx structures */
120 	if (!cmgr->io_bdt_pool) {
121 		QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
122 		goto free_cmd_pool;
123 	}
124 
125 	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
126 	for (i = 0; i < num_ios; i++) {
127 		bdt_info = cmgr->io_bdt_pool[i];
128 		if (bdt_info->bd_tbl) {
129 			dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
130 			    bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
131 			bdt_info->bd_tbl = NULL;
132 		}
133 	}
134 
135 	/* Destroy io_bdt pool */
136 	for (i = 0; i < num_ios; i++) {
137 		kfree(cmgr->io_bdt_pool[i]);
138 		cmgr->io_bdt_pool[i] = NULL;
139 	}
140 
141 	kfree(cmgr->io_bdt_pool);
142 	cmgr->io_bdt_pool = NULL;
143 
144 free_cmd_pool:
145 
146 	for (i = 0; i < num_ios; i++) {
147 		io_req = &cmgr->cmds[i];
148 		kfree(io_req->sgl_task_params);
149 		kfree(io_req->task_params);
150 		/* Make sure we free per command sense buffer */
151 		if (io_req->sense_buffer)
152 			dma_free_coherent(&qedf->pdev->dev,
153 			    QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
154 			    io_req->sense_buffer_dma);
155 		cancel_delayed_work_sync(&io_req->rrq_work);
156 	}
157 
158 	/* Free command manager itself */
159 	vfree(cmgr);
160 }
161 
162 static void qedf_handle_rrq(struct work_struct *work)
163 {
164 	struct qedf_ioreq *io_req =
165 	    container_of(work, struct qedf_ioreq, rrq_work.work);
166 
167 	atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
168 	qedf_send_rrq(io_req);
169 
170 }
171 
172 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
173 {
174 	struct qedf_cmd_mgr *cmgr;
175 	struct io_bdt *bdt_info;
176 	struct qedf_ioreq *io_req;
177 	u16 xid;
178 	int i;
179 	int num_ios;
180 	u16 min_xid = 0;
181 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
182 
183 	/* Make sure num_queues is already set before calling this function */
184 	if (!qedf->num_queues) {
185 		QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
186 		return NULL;
187 	}
188 
189 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
190 		QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
191 			   "max_xid 0x%x.\n", min_xid, max_xid);
192 		return NULL;
193 	}
194 
195 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
196 		   "0x%x.\n", min_xid, max_xid);
197 
198 	num_ios = max_xid - min_xid + 1;
199 
200 	cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
201 	if (!cmgr) {
202 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
203 		return NULL;
204 	}
205 
206 	cmgr->qedf = qedf;
207 	spin_lock_init(&cmgr->lock);
208 
209 	/*
210 	 * Initialize I/O request fields.
211 	 */
212 	xid = 0;
213 
214 	for (i = 0; i < num_ios; i++) {
215 		io_req = &cmgr->cmds[i];
216 		INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
217 
218 		io_req->xid = xid++;
219 
220 		INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
221 
222 		/* Allocate DMA memory to hold sense buffer */
223 		io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
224 		    QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
225 		    GFP_KERNEL);
226 		if (!io_req->sense_buffer) {
227 			QEDF_ERR(&qedf->dbg_ctx,
228 				 "Failed to alloc sense buffer.\n");
229 			goto mem_err;
230 		}
231 
232 		/* Allocate task parameters to pass to f/w init funcions */
233 		io_req->task_params = kzalloc(sizeof(*io_req->task_params),
234 					      GFP_KERNEL);
235 		if (!io_req->task_params) {
236 			QEDF_ERR(&(qedf->dbg_ctx),
237 				 "Failed to allocate task_params for xid=0x%x\n",
238 				 i);
239 			goto mem_err;
240 		}
241 
242 		/*
243 		 * Allocate scatter/gather list info to pass to f/w init
244 		 * functions.
245 		 */
246 		io_req->sgl_task_params = kzalloc(
247 		    sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
248 		if (!io_req->sgl_task_params) {
249 			QEDF_ERR(&(qedf->dbg_ctx),
250 				 "Failed to allocate sgl_task_params for xid=0x%x\n",
251 				 i);
252 			goto mem_err;
253 		}
254 	}
255 
256 	/* Allocate pool of io_bdts - one for each qedf_ioreq */
257 	cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
258 	    GFP_KERNEL);
259 
260 	if (!cmgr->io_bdt_pool) {
261 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
262 		goto mem_err;
263 	}
264 
265 	for (i = 0; i < num_ios; i++) {
266 		cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
267 		    GFP_KERNEL);
268 		if (!cmgr->io_bdt_pool[i]) {
269 			QEDF_WARN(&(qedf->dbg_ctx),
270 				  "Failed to alloc io_bdt_pool[%d].\n", i);
271 			goto mem_err;
272 		}
273 	}
274 
275 	for (i = 0; i < num_ios; i++) {
276 		bdt_info = cmgr->io_bdt_pool[i];
277 		bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
278 		    QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
279 		    &bdt_info->bd_tbl_dma, GFP_KERNEL);
280 		if (!bdt_info->bd_tbl) {
281 			QEDF_WARN(&(qedf->dbg_ctx),
282 				  "Failed to alloc bdt_tbl[%d].\n", i);
283 			goto mem_err;
284 		}
285 	}
286 	atomic_set(&cmgr->free_list_cnt, num_ios);
287 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
288 	    "cmgr->free_list_cnt=%d.\n",
289 	    atomic_read(&cmgr->free_list_cnt));
290 
291 	return cmgr;
292 
293 mem_err:
294 	qedf_cmd_mgr_free(cmgr);
295 	return NULL;
296 }
297 
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
299 {
300 	struct qedf_ctx *qedf = fcport->qedf;
301 	struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
302 	struct qedf_ioreq *io_req = NULL;
303 	struct io_bdt *bd_tbl;
304 	u16 xid;
305 	uint32_t free_sqes;
306 	int i;
307 	unsigned long flags;
308 
309 	free_sqes = atomic_read(&fcport->free_sqes);
310 
311 	if (!free_sqes) {
312 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
313 		    "Returning NULL, free_sqes=%d.\n ",
314 		    free_sqes);
315 		goto out_failed;
316 	}
317 
318 	/* Limit the number of outstanding R/W tasks */
319 	if ((atomic_read(&fcport->num_active_ios) >=
320 	    NUM_RW_TASKS_PER_CONNECTION)) {
321 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
322 		    "Returning NULL, num_active_ios=%d.\n",
323 		    atomic_read(&fcport->num_active_ios));
324 		goto out_failed;
325 	}
326 
327 	/* Limit global TIDs certain tasks */
328 	if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
329 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
330 		    "Returning NULL, free_list_cnt=%d.\n",
331 		    atomic_read(&cmd_mgr->free_list_cnt));
332 		goto out_failed;
333 	}
334 
335 	spin_lock_irqsave(&cmd_mgr->lock, flags);
336 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
337 		io_req = &cmd_mgr->cmds[cmd_mgr->idx];
338 		cmd_mgr->idx++;
339 		if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
340 			cmd_mgr->idx = 0;
341 
342 		/* Check to make sure command was previously freed */
343 		if (!io_req->alloc)
344 			break;
345 	}
346 
347 	if (i == FCOE_PARAMS_NUM_TASKS) {
348 		spin_unlock_irqrestore(&cmd_mgr->lock, flags);
349 		goto out_failed;
350 	}
351 
352 	if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
353 		QEDF_ERR(&qedf->dbg_ctx,
354 			 "io_req found to be dirty ox_id = 0x%x.\n",
355 			 io_req->xid);
356 
357 	/* Clear any flags now that we've reallocated the xid */
358 	io_req->flags = 0;
359 	io_req->alloc = 1;
360 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
361 
362 	atomic_inc(&fcport->num_active_ios);
363 	atomic_dec(&fcport->free_sqes);
364 	xid = io_req->xid;
365 	atomic_dec(&cmd_mgr->free_list_cnt);
366 
367 	io_req->cmd_mgr = cmd_mgr;
368 	io_req->fcport = fcport;
369 
370 	/* Clear any stale sc_cmd back pointer */
371 	io_req->sc_cmd = NULL;
372 	io_req->lun = -1;
373 
374 	/* Hold the io_req against deletion */
375 	kref_init(&io_req->refcount);	/* ID: 001 */
376 	atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
377 
378 	/* Bind io_bdt for this io_req */
379 	/* Have a static link between io_req and io_bdt_pool */
380 	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
381 	if (bd_tbl == NULL) {
382 		QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
383 		kref_put(&io_req->refcount, qedf_release_cmd);
384 		goto out_failed;
385 	}
386 	bd_tbl->io_req = io_req;
387 	io_req->cmd_type = cmd_type;
388 	io_req->tm_flags = 0;
389 
390 	/* Reset sequence offset data */
391 	io_req->rx_buf_off = 0;
392 	io_req->tx_buf_off = 0;
393 	io_req->rx_id = 0xffff; /* No OX_ID */
394 
395 	return io_req;
396 
397 out_failed:
398 	/* Record failure for stats and return NULL to caller */
399 	qedf->alloc_failures++;
400 	return NULL;
401 }
402 
403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
404 {
405 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 	struct qedf_ctx *qedf = io_req->fcport->qedf;
407 	uint64_t sz = sizeof(struct scsi_sge);
408 
409 	/* clear tm flags */
410 	if (mp_req->mp_req_bd) {
411 		dma_free_coherent(&qedf->pdev->dev, sz,
412 		    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
413 		mp_req->mp_req_bd = NULL;
414 	}
415 	if (mp_req->mp_resp_bd) {
416 		dma_free_coherent(&qedf->pdev->dev, sz,
417 		    mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
418 		mp_req->mp_resp_bd = NULL;
419 	}
420 	if (mp_req->req_buf) {
421 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
422 		    mp_req->req_buf, mp_req->req_buf_dma);
423 		mp_req->req_buf = NULL;
424 	}
425 	if (mp_req->resp_buf) {
426 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
427 		    mp_req->resp_buf, mp_req->resp_buf_dma);
428 		mp_req->resp_buf = NULL;
429 	}
430 }
431 
432 void qedf_release_cmd(struct kref *ref)
433 {
434 	struct qedf_ioreq *io_req =
435 	    container_of(ref, struct qedf_ioreq, refcount);
436 	struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 	struct qedf_rport *fcport = io_req->fcport;
438 	unsigned long flags;
439 
440 	if (io_req->cmd_type == QEDF_SCSI_CMD) {
441 		QEDF_WARN(&fcport->qedf->dbg_ctx,
442 			  "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
443 			  io_req, io_req->xid);
444 		WARN_ON(io_req->sc_cmd);
445 	}
446 
447 	if (io_req->cmd_type == QEDF_ELS ||
448 	    io_req->cmd_type == QEDF_TASK_MGMT_CMD)
449 		qedf_free_mp_resc(io_req);
450 
451 	atomic_inc(&cmd_mgr->free_list_cnt);
452 	atomic_dec(&fcport->num_active_ios);
453 	atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
454 	if (atomic_read(&fcport->num_active_ios) < 0) {
455 		QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
456 		WARN_ON(1);
457 	}
458 
459 	/* Increment task retry identifier now that the request is released */
460 	io_req->task_retry_identifier++;
461 	io_req->fcport = NULL;
462 
463 	clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
464 	io_req->cpu = 0;
465 	spin_lock_irqsave(&cmd_mgr->lock, flags);
466 	io_req->fcport = NULL;
467 	io_req->alloc = 0;
468 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
469 }
470 
471 static int qedf_map_sg(struct qedf_ioreq *io_req)
472 {
473 	struct scsi_cmnd *sc = io_req->sc_cmd;
474 	struct Scsi_Host *host = sc->device->host;
475 	struct fc_lport *lport = shost_priv(host);
476 	struct qedf_ctx *qedf = lport_priv(lport);
477 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
478 	struct scatterlist *sg;
479 	int byte_count = 0;
480 	int sg_count = 0;
481 	int bd_count = 0;
482 	u32 sg_len;
483 	u64 addr;
484 	int i = 0;
485 
486 	sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
487 	    scsi_sg_count(sc), sc->sc_data_direction);
488 	sg = scsi_sglist(sc);
489 
490 	io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
491 
492 	if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
493 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
494 
495 	scsi_for_each_sg(sc, sg, sg_count, i) {
496 		sg_len = (u32)sg_dma_len(sg);
497 		addr = (u64)sg_dma_address(sg);
498 
499 		/*
500 		 * Intermediate s/g element so check if start address
501 		 * is page aligned.  Only required for writes and only if the
502 		 * number of scatter/gather elements is 8 or more.
503 		 */
504 		if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
505 		    (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
506 			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
507 
508 		bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
509 		bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
510 		bd[bd_count].sge_len = cpu_to_le32(sg_len);
511 
512 		bd_count++;
513 		byte_count += sg_len;
514 	}
515 
516 	/* To catch a case where FAST and SLOW nothing is set, set FAST */
517 	if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
518 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
519 
520 	if (byte_count != scsi_bufflen(sc))
521 		QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
522 			  "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
523 			   scsi_bufflen(sc), io_req->xid);
524 
525 	return bd_count;
526 }
527 
528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
529 {
530 	struct scsi_cmnd *sc = io_req->sc_cmd;
531 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
532 	int bd_count;
533 
534 	if (scsi_sg_count(sc)) {
535 		bd_count = qedf_map_sg(io_req);
536 		if (bd_count == 0)
537 			return -ENOMEM;
538 	} else {
539 		bd_count = 0;
540 		bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
541 		bd[0].sge_len = 0;
542 	}
543 	io_req->bd_tbl->bd_valid = bd_count;
544 
545 	return 0;
546 }
547 
548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
549 				  struct fcp_cmnd *fcp_cmnd)
550 {
551 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
552 
553 	/* fcp_cmnd is 32 bytes */
554 	memset(fcp_cmnd, 0, FCP_CMND_LEN);
555 
556 	/* 8 bytes: SCSI LUN info */
557 	int_to_scsilun(sc_cmd->device->lun,
558 			(struct scsi_lun *)&fcp_cmnd->fc_lun);
559 
560 	/* 4 bytes: flag info */
561 	fcp_cmnd->fc_pri_ta = 0;
562 	fcp_cmnd->fc_tm_flags = io_req->tm_flags;
563 	fcp_cmnd->fc_flags = io_req->io_req_flags;
564 	fcp_cmnd->fc_cmdref = 0;
565 
566 	/* Populate data direction */
567 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
568 		fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
569 	} else {
570 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
571 			fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
572 		else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
573 			fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
574 	}
575 
576 	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
577 
578 	/* 16 bytes: CDB information */
579 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
580 		memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
581 
582 	/* 4 bytes: FCP data length */
583 	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
584 }
585 
586 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
587 	struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
588 	struct fcoe_wqe *sqe)
589 {
590 	enum fcoe_task_type task_type;
591 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
592 	struct io_bdt *bd_tbl = io_req->bd_tbl;
593 	u8 fcp_cmnd[32];
594 	u32 tmp_fcp_cmnd[8];
595 	int bd_count = 0;
596 	struct qedf_ctx *qedf = fcport->qedf;
597 	uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
598 	struct regpair sense_data_buffer_phys_addr;
599 	u32 tx_io_size = 0;
600 	u32 rx_io_size = 0;
601 	int i, cnt;
602 
603 	/* Note init_initiator_rw_fcoe_task memsets the task context */
604 	io_req->task = task_ctx;
605 	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
606 	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
607 	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
608 
609 	/* Set task type bassed on DMA directio of command */
610 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
611 		task_type = FCOE_TASK_TYPE_READ_INITIATOR;
612 	} else {
613 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
614 			task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
615 			tx_io_size = io_req->data_xfer_len;
616 		} else {
617 			task_type = FCOE_TASK_TYPE_READ_INITIATOR;
618 			rx_io_size = io_req->data_xfer_len;
619 		}
620 	}
621 
622 	/* Setup the fields for fcoe_task_params */
623 	io_req->task_params->context = task_ctx;
624 	io_req->task_params->sqe = sqe;
625 	io_req->task_params->task_type = task_type;
626 	io_req->task_params->tx_io_size = tx_io_size;
627 	io_req->task_params->rx_io_size = rx_io_size;
628 	io_req->task_params->conn_cid = fcport->fw_cid;
629 	io_req->task_params->itid = io_req->xid;
630 	io_req->task_params->cq_rss_number = cq_idx;
631 	io_req->task_params->is_tape_device = fcport->dev_type;
632 
633 	/* Fill in information for scatter/gather list */
634 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
635 		bd_count = bd_tbl->bd_valid;
636 		io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
637 		io_req->sgl_task_params->sgl_phys_addr.lo =
638 			U64_LO(bd_tbl->bd_tbl_dma);
639 		io_req->sgl_task_params->sgl_phys_addr.hi =
640 			U64_HI(bd_tbl->bd_tbl_dma);
641 		io_req->sgl_task_params->num_sges = bd_count;
642 		io_req->sgl_task_params->total_buffer_size =
643 		    scsi_bufflen(io_req->sc_cmd);
644 		if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
645 			io_req->sgl_task_params->small_mid_sge = 1;
646 		else
647 			io_req->sgl_task_params->small_mid_sge = 0;
648 	}
649 
650 	/* Fill in physical address of sense buffer */
651 	sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
652 	sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
653 
654 	/* fill FCP_CMND IU */
655 	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
656 
657 	/* Swap fcp_cmnd since FC is big endian */
658 	cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
659 	for (i = 0; i < cnt; i++) {
660 		tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
661 	}
662 	memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
663 
664 	init_initiator_rw_fcoe_task(io_req->task_params,
665 				    io_req->sgl_task_params,
666 				    sense_data_buffer_phys_addr,
667 				    io_req->task_retry_identifier, fcp_cmnd);
668 
669 	/* Increment SGL type counters */
670 	if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
671 		qedf->slow_sge_ios++;
672 	else
673 		qedf->fast_sge_ios++;
674 }
675 
676 void qedf_init_mp_task(struct qedf_ioreq *io_req,
677 	struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
678 {
679 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
680 	struct qedf_rport *fcport = io_req->fcport;
681 	struct qedf_ctx *qedf = io_req->fcport->qedf;
682 	struct fc_frame_header *fc_hdr;
683 	struct fcoe_tx_mid_path_params task_fc_hdr;
684 	struct scsi_sgl_task_params tx_sgl_task_params;
685 	struct scsi_sgl_task_params rx_sgl_task_params;
686 
687 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
688 		  "Initializing MP task for cmd_type=%d\n",
689 		  io_req->cmd_type);
690 
691 	qedf->control_requests++;
692 
693 	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
694 	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
695 	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
696 	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
697 
698 	/* Setup the task from io_req for easy reference */
699 	io_req->task = task_ctx;
700 
701 	/* Setup the fields for fcoe_task_params */
702 	io_req->task_params->context = task_ctx;
703 	io_req->task_params->sqe = sqe;
704 	io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
705 	io_req->task_params->tx_io_size = io_req->data_xfer_len;
706 	/* rx_io_size tells the f/w how large a response buffer we have */
707 	io_req->task_params->rx_io_size = PAGE_SIZE;
708 	io_req->task_params->conn_cid = fcport->fw_cid;
709 	io_req->task_params->itid = io_req->xid;
710 	/* Return middle path commands on CQ 0 */
711 	io_req->task_params->cq_rss_number = 0;
712 	io_req->task_params->is_tape_device = fcport->dev_type;
713 
714 	fc_hdr = &(mp_req->req_fc_hdr);
715 	/* Set OX_ID and RX_ID based on driver task id */
716 	fc_hdr->fh_ox_id = io_req->xid;
717 	fc_hdr->fh_rx_id = htons(0xffff);
718 
719 	/* Set up FC header information */
720 	task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
721 	task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
722 	task_fc_hdr.type = fc_hdr->fh_type;
723 	task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
724 	task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
725 	task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
726 	task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
727 
728 	/* Set up s/g list parameters for request buffer */
729 	tx_sgl_task_params.sgl = mp_req->mp_req_bd;
730 	tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
731 	tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
732 	tx_sgl_task_params.num_sges = 1;
733 	/* Set PAGE_SIZE for now since sg element is that size ??? */
734 	tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
735 	tx_sgl_task_params.small_mid_sge = 0;
736 
737 	/* Set up s/g list parameters for request buffer */
738 	rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
739 	rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
740 	rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
741 	rx_sgl_task_params.num_sges = 1;
742 	/* Set PAGE_SIZE for now since sg element is that size ??? */
743 	rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
744 	rx_sgl_task_params.small_mid_sge = 0;
745 
746 
747 	/*
748 	 * Last arg is 0 as previous code did not set that we wanted the
749 	 * fc header information.
750 	 */
751 	init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
752 						     &task_fc_hdr,
753 						     &tx_sgl_task_params,
754 						     &rx_sgl_task_params, 0);
755 }
756 
757 /* Presumed that fcport->rport_lock is held */
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
759 {
760 	uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
761 	u16 rval;
762 
763 	rval = fcport->sq_prod_idx;
764 
765 	/* Adjust ring index */
766 	fcport->sq_prod_idx++;
767 	fcport->fw_sq_prod_idx++;
768 	if (fcport->sq_prod_idx == total_sqe)
769 		fcport->sq_prod_idx = 0;
770 
771 	return rval;
772 }
773 
774 void qedf_ring_doorbell(struct qedf_rport *fcport)
775 {
776 	struct fcoe_db_data dbell = { 0 };
777 
778 	dbell.agg_flags = 0;
779 
780 	dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
781 	dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
782 	dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
783 	    FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
784 
785 	dbell.sq_prod = fcport->fw_sq_prod_idx;
786 	/* wmb makes sure that the BDs data is updated before updating the
787 	 * producer, otherwise FW may read old data from the BDs.
788 	 */
789 	wmb();
790 	barrier();
791 	writel(*(u32 *)&dbell, fcport->p_doorbell);
792 	/*
793 	 * Fence required to flush the write combined buffer, since another
794 	 * CPU may write to the same doorbell address and data may be lost
795 	 * due to relaxed order nature of write combined bar.
796 	 */
797 	wmb();
798 }
799 
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
801 			  int8_t direction)
802 {
803 	struct qedf_ctx *qedf = fcport->qedf;
804 	struct qedf_io_log *io_log;
805 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
806 	unsigned long flags;
807 
808 	spin_lock_irqsave(&qedf->io_trace_lock, flags);
809 
810 	io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
811 	io_log->direction = direction;
812 	io_log->task_id = io_req->xid;
813 	io_log->port_id = fcport->rdata->ids.port_id;
814 	io_log->lun = sc_cmd->device->lun;
815 	io_log->op = sc_cmd->cmnd[0];
816 	io_log->lba[0] = sc_cmd->cmnd[2];
817 	io_log->lba[1] = sc_cmd->cmnd[3];
818 	io_log->lba[2] = sc_cmd->cmnd[4];
819 	io_log->lba[3] = sc_cmd->cmnd[5];
820 	io_log->bufflen = scsi_bufflen(sc_cmd);
821 	io_log->sg_count = scsi_sg_count(sc_cmd);
822 	io_log->result = sc_cmd->result;
823 	io_log->jiffies = jiffies;
824 	io_log->refcount = kref_read(&io_req->refcount);
825 
826 	if (direction == QEDF_IO_TRACE_REQ) {
827 		/* For requests we only care abot the submission CPU */
828 		io_log->req_cpu = io_req->cpu;
829 		io_log->int_cpu = 0;
830 		io_log->rsp_cpu = 0;
831 	} else if (direction == QEDF_IO_TRACE_RSP) {
832 		io_log->req_cpu = io_req->cpu;
833 		io_log->int_cpu = io_req->int_cpu;
834 		io_log->rsp_cpu = smp_processor_id();
835 	}
836 
837 	io_log->sge_type = io_req->sge_type;
838 
839 	qedf->io_trace_idx++;
840 	if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
841 		qedf->io_trace_idx = 0;
842 
843 	spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
844 }
845 
846 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
847 {
848 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
849 	struct Scsi_Host *host = sc_cmd->device->host;
850 	struct fc_lport *lport = shost_priv(host);
851 	struct qedf_ctx *qedf = lport_priv(lport);
852 	struct fcoe_task_context *task_ctx;
853 	u16 xid;
854 	struct fcoe_wqe *sqe;
855 	u16 sqe_idx;
856 
857 	/* Initialize rest of io_req fileds */
858 	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
859 	qedf_priv(sc_cmd)->io_req = io_req;
860 	io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
861 
862 	/* Record which cpu this request is associated with */
863 	io_req->cpu = smp_processor_id();
864 
865 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
866 		io_req->io_req_flags = QEDF_READ;
867 		qedf->input_requests++;
868 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
869 		io_req->io_req_flags = QEDF_WRITE;
870 		qedf->output_requests++;
871 	} else {
872 		io_req->io_req_flags = 0;
873 		qedf->control_requests++;
874 	}
875 
876 	xid = io_req->xid;
877 
878 	/* Build buffer descriptor list for firmware from sg list */
879 	if (qedf_build_bd_list_from_sg(io_req)) {
880 		QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
881 		/* Release cmd will release io_req, but sc_cmd is assigned */
882 		io_req->sc_cmd = NULL;
883 		kref_put(&io_req->refcount, qedf_release_cmd);
884 		return -EAGAIN;
885 	}
886 
887 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
888 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
889 		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
890 		/* Release cmd will release io_req, but sc_cmd is assigned */
891 		io_req->sc_cmd = NULL;
892 		kref_put(&io_req->refcount, qedf_release_cmd);
893 		return -EINVAL;
894 	}
895 
896 	/* Record LUN number for later use if we need them */
897 	io_req->lun = (int)sc_cmd->device->lun;
898 
899 	/* Obtain free SQE */
900 	sqe_idx = qedf_get_sqe_idx(fcport);
901 	sqe = &fcport->sq[sqe_idx];
902 	memset(sqe, 0, sizeof(struct fcoe_wqe));
903 
904 	/* Get the task context */
905 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
906 	if (!task_ctx) {
907 		QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
908 			   xid);
909 		/* Release cmd will release io_req, but sc_cmd is assigned */
910 		io_req->sc_cmd = NULL;
911 		kref_put(&io_req->refcount, qedf_release_cmd);
912 		return -EINVAL;
913 	}
914 
915 	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
916 
917 	/* Ring doorbell */
918 	qedf_ring_doorbell(fcport);
919 
920 	/* Set that command is with the firmware now */
921 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
922 
923 	if (qedf_io_tracing && io_req->sc_cmd)
924 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
925 
926 	return false;
927 }
928 
929 int
930 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
931 {
932 	struct fc_lport *lport = shost_priv(host);
933 	struct qedf_ctx *qedf = lport_priv(lport);
934 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
935 	struct fc_rport_libfc_priv *rp = rport->dd_data;
936 	struct qedf_rport *fcport;
937 	struct qedf_ioreq *io_req;
938 	int rc = 0;
939 	int rval;
940 	unsigned long flags = 0;
941 	int num_sgs = 0;
942 
943 	num_sgs = scsi_sg_count(sc_cmd);
944 	if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
945 		QEDF_ERR(&qedf->dbg_ctx,
946 			 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
947 			 num_sgs, QEDF_MAX_BDS_PER_CMD);
948 		sc_cmd->result = DID_ERROR;
949 		scsi_done(sc_cmd);
950 		return 0;
951 	}
952 
953 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
954 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
955 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
956 			  "Returning DNC as unloading or stop io, flags 0x%lx.\n",
957 			  qedf->flags);
958 		sc_cmd->result = DID_NO_CONNECT << 16;
959 		scsi_done(sc_cmd);
960 		return 0;
961 	}
962 
963 	if (!qedf->pdev->msix_enabled) {
964 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
965 		    "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
966 		    sc_cmd);
967 		sc_cmd->result = DID_NO_CONNECT << 16;
968 		scsi_done(sc_cmd);
969 		return 0;
970 	}
971 
972 	rval = fc_remote_port_chkready(rport);
973 	if (rval) {
974 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
975 			  "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
976 			  rval, rport->port_id);
977 		sc_cmd->result = rval;
978 		scsi_done(sc_cmd);
979 		return 0;
980 	}
981 
982 	/* Retry command if we are doing a qed drain operation */
983 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
984 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
985 		rc = SCSI_MLQUEUE_HOST_BUSY;
986 		goto exit_qcmd;
987 	}
988 
989 	if (lport->state != LPORT_ST_READY ||
990 	    atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
991 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
992 		rc = SCSI_MLQUEUE_HOST_BUSY;
993 		goto exit_qcmd;
994 	}
995 
996 	/* rport and tgt are allocated together, so tgt should be non-NULL */
997 	fcport = (struct qedf_rport *)&rp[1];
998 
999 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1000 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1001 		/*
1002 		 * Session is not offloaded yet. Let SCSI-ml retry
1003 		 * the command.
1004 		 */
1005 		rc = SCSI_MLQUEUE_TARGET_BUSY;
1006 		goto exit_qcmd;
1007 	}
1008 
1009 	atomic_inc(&fcport->ios_to_queue);
1010 
1011 	if (fcport->retry_delay_timestamp) {
1012 		/* Take fcport->rport_lock for resetting the delay_timestamp */
1013 		spin_lock_irqsave(&fcport->rport_lock, flags);
1014 		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1015 			fcport->retry_delay_timestamp = 0;
1016 		} else {
1017 			spin_unlock_irqrestore(&fcport->rport_lock, flags);
1018 			/* If retry_delay timer is active, flow off the ML */
1019 			rc = SCSI_MLQUEUE_TARGET_BUSY;
1020 			atomic_dec(&fcport->ios_to_queue);
1021 			goto exit_qcmd;
1022 		}
1023 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
1024 	}
1025 
1026 	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1027 	if (!io_req) {
1028 		rc = SCSI_MLQUEUE_HOST_BUSY;
1029 		atomic_dec(&fcport->ios_to_queue);
1030 		goto exit_qcmd;
1031 	}
1032 
1033 	io_req->sc_cmd = sc_cmd;
1034 
1035 	/* Take fcport->rport_lock for posting to fcport send queue */
1036 	spin_lock_irqsave(&fcport->rport_lock, flags);
1037 	if (qedf_post_io_req(fcport, io_req)) {
1038 		QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1039 		/* Return SQE to pool */
1040 		atomic_inc(&fcport->free_sqes);
1041 		rc = SCSI_MLQUEUE_HOST_BUSY;
1042 	}
1043 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1044 	atomic_dec(&fcport->ios_to_queue);
1045 
1046 exit_qcmd:
1047 	return rc;
1048 }
1049 
1050 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1051 				 struct fcoe_cqe_rsp_info *fcp_rsp)
1052 {
1053 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1054 	struct qedf_ctx *qedf = io_req->fcport->qedf;
1055 	u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1056 	int fcp_sns_len = 0;
1057 	int fcp_rsp_len = 0;
1058 	uint8_t *rsp_info, *sense_data;
1059 
1060 	io_req->fcp_status = FC_GOOD;
1061 	io_req->fcp_resid = 0;
1062 	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1063 	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1064 		io_req->fcp_resid = fcp_rsp->fcp_resid;
1065 
1066 	io_req->scsi_comp_flags = rsp_flags;
1067 	io_req->cdb_status = fcp_rsp->scsi_status_code;
1068 
1069 	if (rsp_flags &
1070 	    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1071 		fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1072 
1073 	if (rsp_flags &
1074 	    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1075 		fcp_sns_len = fcp_rsp->fcp_sns_len;
1076 
1077 	io_req->fcp_rsp_len = fcp_rsp_len;
1078 	io_req->fcp_sns_len = fcp_sns_len;
1079 	rsp_info = sense_data = io_req->sense_buffer;
1080 
1081 	/* fetch fcp_rsp_code */
1082 	if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1083 		/* Only for task management function */
1084 		io_req->fcp_rsp_code = rsp_info[3];
1085 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1086 		    "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1087 		/* Adjust sense-data location. */
1088 		sense_data += fcp_rsp_len;
1089 	}
1090 
1091 	if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1092 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1093 		    "Truncating sense buffer\n");
1094 		fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1095 	}
1096 
1097 	/* The sense buffer can be NULL for TMF commands */
1098 	if (sc_cmd->sense_buffer) {
1099 		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1100 		if (fcp_sns_len)
1101 			memcpy(sc_cmd->sense_buffer, sense_data,
1102 			    fcp_sns_len);
1103 	}
1104 }
1105 
1106 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1107 {
1108 	struct scsi_cmnd *sc = io_req->sc_cmd;
1109 
1110 	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1111 		dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1112 		    scsi_sg_count(sc), sc->sc_data_direction);
1113 		io_req->bd_tbl->bd_valid = 0;
1114 	}
1115 }
1116 
1117 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1118 	struct qedf_ioreq *io_req)
1119 {
1120 	struct scsi_cmnd *sc_cmd;
1121 	struct fcoe_cqe_rsp_info *fcp_rsp;
1122 	struct qedf_rport *fcport;
1123 	int refcount;
1124 	u16 scope, qualifier = 0;
1125 	u8 fw_residual_flag = 0;
1126 	unsigned long flags = 0;
1127 	u16 chk_scope = 0;
1128 
1129 	if (!io_req)
1130 		return;
1131 	if (!cqe)
1132 		return;
1133 
1134 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1135 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1136 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1137 		QEDF_ERR(&qedf->dbg_ctx,
1138 			 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1139 			 io_req->xid);
1140 		return;
1141 	}
1142 
1143 	sc_cmd = io_req->sc_cmd;
1144 	fcp_rsp = &cqe->cqe_info.rsp_info;
1145 
1146 	if (!sc_cmd) {
1147 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1148 		return;
1149 	}
1150 
1151 	if (!qedf_priv(sc_cmd)->io_req) {
1152 		QEDF_WARN(&(qedf->dbg_ctx),
1153 			  "io_req is NULL, returned in another context.\n");
1154 		return;
1155 	}
1156 
1157 	if (!sc_cmd->device) {
1158 		QEDF_ERR(&qedf->dbg_ctx,
1159 			 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1160 		return;
1161 	}
1162 
1163 	if (!scsi_cmd_to_rq(sc_cmd)->q) {
1164 		QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1165 		   "is not valid, sc_cmd=%p.\n", sc_cmd);
1166 		return;
1167 	}
1168 
1169 	fcport = io_req->fcport;
1170 
1171 	/*
1172 	 * When flush is active, let the cmds be completed from the cleanup
1173 	 * context
1174 	 */
1175 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1176 	    (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1177 	     sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1178 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1179 			  "Dropping good completion xid=0x%x as fcport is flushing",
1180 			  io_req->xid);
1181 		return;
1182 	}
1183 
1184 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
1185 
1186 	qedf_unmap_sg_list(qedf, io_req);
1187 
1188 	/* Check for FCP transport error */
1189 	if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1190 		QEDF_ERR(&(qedf->dbg_ctx),
1191 		    "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1192 		    "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1193 		    io_req->fcp_rsp_code);
1194 		sc_cmd->result = DID_BUS_BUSY << 16;
1195 		goto out;
1196 	}
1197 
1198 	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1199 	    FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1200 	if (fw_residual_flag) {
1201 		QEDF_ERR(&qedf->dbg_ctx,
1202 			 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1203 			 io_req->xid, fcp_rsp->rsp_flags.flags,
1204 			 io_req->fcp_resid,
1205 			 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1206 			 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1207 
1208 		if (io_req->cdb_status == 0)
1209 			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1210 		else
1211 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1212 
1213 		/*
1214 		 * Set resid to the whole buffer length so we won't try to resue
1215 		 * any previously data.
1216 		 */
1217 		scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1218 		goto out;
1219 	}
1220 
1221 	switch (io_req->fcp_status) {
1222 	case FC_GOOD:
1223 		if (io_req->cdb_status == 0) {
1224 			/* Good I/O completion */
1225 			sc_cmd->result = DID_OK << 16;
1226 		} else {
1227 			refcount = kref_read(&io_req->refcount);
1228 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1229 			    "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1230 			    "lba=%02x%02x%02x%02x cdb_status=%d "
1231 			    "fcp_resid=0x%x refcount=%d.\n",
1232 			    qedf->lport->host->host_no, sc_cmd->device->id,
1233 			    sc_cmd->device->lun, io_req->xid,
1234 			    sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1235 			    sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1236 			    io_req->cdb_status, io_req->fcp_resid,
1237 			    refcount);
1238 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1239 
1240 			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1241 			    io_req->cdb_status == SAM_STAT_BUSY) {
1242 				/*
1243 				 * Check whether we need to set retry_delay at
1244 				 * all based on retry_delay module parameter
1245 				 * and the status qualifier.
1246 				 */
1247 
1248 				/* Upper 2 bits */
1249 				scope = fcp_rsp->retry_delay_timer & 0xC000;
1250 				/* Lower 14 bits */
1251 				qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1252 
1253 				if (qedf_retry_delay)
1254 					chk_scope = 1;
1255 				/* Record stats */
1256 				if (io_req->cdb_status ==
1257 				    SAM_STAT_TASK_SET_FULL)
1258 					qedf->task_set_fulls++;
1259 				else
1260 					qedf->busy++;
1261 			}
1262 		}
1263 		if (io_req->fcp_resid)
1264 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
1265 
1266 		if (chk_scope == 1) {
1267 			if ((scope == 1 || scope == 2) &&
1268 			    (qualifier > 0 && qualifier <= 0x3FEF)) {
1269 				/* Check we don't go over the max */
1270 				if (qualifier > QEDF_RETRY_DELAY_MAX) {
1271 					qualifier = QEDF_RETRY_DELAY_MAX;
1272 					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1273 						  "qualifier = %d\n",
1274 						  (fcp_rsp->retry_delay_timer &
1275 						  0x3FFF));
1276 				}
1277 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1278 					  "Scope = %d and qualifier = %d",
1279 					  scope, qualifier);
1280 				/*  Take fcport->rport_lock to
1281 				 *  update the retry_delay_timestamp
1282 				 */
1283 				spin_lock_irqsave(&fcport->rport_lock, flags);
1284 				fcport->retry_delay_timestamp =
1285 					jiffies + (qualifier * HZ / 10);
1286 				spin_unlock_irqrestore(&fcport->rport_lock,
1287 						       flags);
1288 
1289 			} else {
1290 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1291 					  "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1292 					  scope, qualifier);
1293 			}
1294 		}
1295 		break;
1296 	default:
1297 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1298 			   io_req->fcp_status);
1299 		break;
1300 	}
1301 
1302 out:
1303 	if (qedf_io_tracing)
1304 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1305 
1306 	/*
1307 	 * We wait till the end of the function to clear the
1308 	 * outstanding bit in case we need to send an abort
1309 	 */
1310 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1311 
1312 	io_req->sc_cmd = NULL;
1313 	qedf_priv(sc_cmd)->io_req =  NULL;
1314 	scsi_done(sc_cmd);
1315 	kref_put(&io_req->refcount, qedf_release_cmd);
1316 }
1317 
1318 /* Return a SCSI command in some other context besides a normal completion */
1319 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1320 	int result)
1321 {
1322 	struct scsi_cmnd *sc_cmd;
1323 	int refcount;
1324 
1325 	if (!io_req) {
1326 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1327 		return;
1328 	}
1329 
1330 	if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1331 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1332 			  "io_req:%p scsi_done handling already done\n",
1333 			  io_req);
1334 		return;
1335 	}
1336 
1337 	/*
1338 	 * We will be done with this command after this call so clear the
1339 	 * outstanding bit.
1340 	 */
1341 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1342 
1343 	sc_cmd = io_req->sc_cmd;
1344 
1345 	if (!sc_cmd) {
1346 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1347 		return;
1348 	}
1349 
1350 	if (!virt_addr_valid(sc_cmd)) {
1351 		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1352 		goto bad_scsi_ptr;
1353 	}
1354 
1355 	if (!qedf_priv(sc_cmd)->io_req) {
1356 		QEDF_WARN(&(qedf->dbg_ctx),
1357 			  "io_req is NULL, returned in another context.\n");
1358 		return;
1359 	}
1360 
1361 	if (!sc_cmd->device) {
1362 		QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1363 			 sc_cmd);
1364 		goto bad_scsi_ptr;
1365 	}
1366 
1367 	if (!virt_addr_valid(sc_cmd->device)) {
1368 		QEDF_ERR(&qedf->dbg_ctx,
1369 			 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1370 		goto bad_scsi_ptr;
1371 	}
1372 
1373 	if (!sc_cmd->sense_buffer) {
1374 		QEDF_ERR(&qedf->dbg_ctx,
1375 			 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1376 			 sc_cmd);
1377 		goto bad_scsi_ptr;
1378 	}
1379 
1380 	if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1381 		QEDF_ERR(&qedf->dbg_ctx,
1382 			 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1383 			 sc_cmd);
1384 		goto bad_scsi_ptr;
1385 	}
1386 
1387 	qedf_unmap_sg_list(qedf, io_req);
1388 
1389 	sc_cmd->result = result << 16;
1390 	refcount = kref_read(&io_req->refcount);
1391 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1392 	    "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1393 	    "allowed=%d retries=%d refcount=%d.\n",
1394 	    qedf->lport->host->host_no, sc_cmd->device->id,
1395 	    sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1396 	    sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1397 	    sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1398 	    refcount);
1399 
1400 	/*
1401 	 * Set resid to the whole buffer length so we won't try to resue any
1402 	 * previously read data
1403 	 */
1404 	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1405 
1406 	if (qedf_io_tracing)
1407 		qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1408 
1409 	io_req->sc_cmd = NULL;
1410 	qedf_priv(sc_cmd)->io_req = NULL;
1411 	scsi_done(sc_cmd);
1412 	kref_put(&io_req->refcount, qedf_release_cmd);
1413 	return;
1414 
1415 bad_scsi_ptr:
1416 	/*
1417 	 * Clear the io_req->sc_cmd backpointer so we don't try to process
1418 	 * this again
1419 	 */
1420 	io_req->sc_cmd = NULL;
1421 	kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
1422 }
1423 
1424 /*
1425  * Handle warning type CQE completions. This is mainly used for REC timer
1426  * popping.
1427  */
1428 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1429 	struct qedf_ioreq *io_req)
1430 {
1431 	int rval, i;
1432 	struct qedf_rport *fcport = io_req->fcport;
1433 	u64 err_warn_bit_map;
1434 	u8 err_warn = 0xff;
1435 
1436 	if (!cqe) {
1437 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1438 			  "cqe is NULL for io_req %p xid=0x%x\n",
1439 			  io_req, io_req->xid);
1440 		return;
1441 	}
1442 
1443 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1444 		  "xid=0x%x\n", io_req->xid);
1445 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1446 		  "err_warn_bitmap=%08x:%08x\n",
1447 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1448 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1449 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1450 		  "rx_buff_off=%08x, rx_id=%04x\n",
1451 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1452 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1453 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1454 
1455 	/* Normalize the error bitmap value to an just an unsigned int */
1456 	err_warn_bit_map = (u64)
1457 	    ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1458 	    (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1459 	for (i = 0; i < 64; i++) {
1460 		if (err_warn_bit_map & (u64)((u64)1 << i)) {
1461 			err_warn = i;
1462 			break;
1463 		}
1464 	}
1465 
1466 	/* Check if REC TOV expired if this is a tape device */
1467 	if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1468 		if (err_warn ==
1469 		    FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1470 			QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1471 			if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1472 				io_req->rx_buf_off =
1473 				    cqe->cqe_info.err_info.rx_buf_off;
1474 				io_req->tx_buf_off =
1475 				    cqe->cqe_info.err_info.tx_buf_off;
1476 				io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1477 				rval = qedf_send_rec(io_req);
1478 				/*
1479 				 * We only want to abort the io_req if we
1480 				 * can't queue the REC command as we want to
1481 				 * keep the exchange open for recovery.
1482 				 */
1483 				if (rval)
1484 					goto send_abort;
1485 			}
1486 			return;
1487 		}
1488 	}
1489 
1490 send_abort:
1491 	init_completion(&io_req->abts_done);
1492 	rval = qedf_initiate_abts(io_req, true);
1493 	if (rval)
1494 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1495 }
1496 
1497 /* Cleanup a command when we receive an error detection completion */
1498 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1499 	struct qedf_ioreq *io_req)
1500 {
1501 	int rval;
1502 
1503 	if (io_req == NULL) {
1504 		QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1505 		return;
1506 	}
1507 
1508 	if (io_req->fcport == NULL) {
1509 		QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1510 		return;
1511 	}
1512 
1513 	if (!cqe) {
1514 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1515 			"cqe is NULL for io_req %p\n", io_req);
1516 		return;
1517 	}
1518 
1519 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1520 		  "xid=0x%x\n", io_req->xid);
1521 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1522 		  "err_warn_bitmap=%08x:%08x\n",
1523 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1524 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1525 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1526 		  "rx_buff_off=%08x, rx_id=%04x\n",
1527 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1528 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1529 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1530 
1531 	/* When flush is active, let the cmds be flushed out from the cleanup context */
1532 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1533 		(test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1534 		 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1535 		QEDF_ERR(&qedf->dbg_ctx,
1536 			"Dropping EQE for xid=0x%x as fcport is flushing",
1537 			io_req->xid);
1538 		return;
1539 	}
1540 
1541 	if (qedf->stop_io_on_error) {
1542 		qedf_stop_all_io(qedf);
1543 		return;
1544 	}
1545 
1546 	init_completion(&io_req->abts_done);
1547 	rval = qedf_initiate_abts(io_req, true);
1548 	if (rval)
1549 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1550 }
1551 
1552 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1553 	struct qedf_ioreq *els_req)
1554 {
1555 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1556 	    "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1557 	    kref_read(&els_req->refcount));
1558 
1559 	/*
1560 	 * Need to distinguish this from a timeout when calling the
1561 	 * els_req->cb_func.
1562 	 */
1563 	els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1564 
1565 	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1566 
1567 	/* Cancel the timer */
1568 	cancel_delayed_work_sync(&els_req->timeout_work);
1569 
1570 	/* Call callback function to complete command */
1571 	if (els_req->cb_func && els_req->cb_arg) {
1572 		els_req->cb_func(els_req->cb_arg);
1573 		els_req->cb_arg = NULL;
1574 	}
1575 
1576 	/* Release kref for original initiate_els */
1577 	kref_put(&els_req->refcount, qedf_release_cmd);
1578 }
1579 
1580 /* A value of -1 for lun is a wild card that means flush all
1581  * active SCSI I/Os for the target.
1582  */
1583 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1584 {
1585 	struct qedf_ioreq *io_req;
1586 	struct qedf_ctx *qedf;
1587 	struct qedf_cmd_mgr *cmd_mgr;
1588 	int i, rc;
1589 	unsigned long flags;
1590 	int flush_cnt = 0;
1591 	int wait_cnt = 100;
1592 	int refcount = 0;
1593 
1594 	if (!fcport) {
1595 		QEDF_ERR(NULL, "fcport is NULL\n");
1596 		return;
1597 	}
1598 
1599 	/* Check that fcport is still offloaded */
1600 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1601 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1602 		return;
1603 	}
1604 
1605 	qedf = fcport->qedf;
1606 
1607 	if (!qedf) {
1608 		QEDF_ERR(NULL, "qedf is NULL.\n");
1609 		return;
1610 	}
1611 
1612 	/* Only wait for all commands to be queued in the Upload context */
1613 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1614 	    (lun == -1)) {
1615 		while (atomic_read(&fcport->ios_to_queue)) {
1616 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1617 				  "Waiting for %d I/Os to be queued\n",
1618 				  atomic_read(&fcport->ios_to_queue));
1619 			if (wait_cnt == 0) {
1620 				QEDF_ERR(NULL,
1621 					 "%d IOs request could not be queued\n",
1622 					 atomic_read(&fcport->ios_to_queue));
1623 			}
1624 			msleep(20);
1625 			wait_cnt--;
1626 		}
1627 	}
1628 
1629 	cmd_mgr = qedf->cmd_mgr;
1630 
1631 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1632 		  "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1633 		  atomic_read(&fcport->num_active_ios), fcport,
1634 		  fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1635 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1636 
1637 	mutex_lock(&qedf->flush_mutex);
1638 	if (lun == -1) {
1639 		set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1640 	} else {
1641 		set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1642 		fcport->lun_reset_lun = lun;
1643 	}
1644 
1645 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1646 		io_req = &cmd_mgr->cmds[i];
1647 
1648 		if (!io_req)
1649 			continue;
1650 		if (!io_req->fcport)
1651 			continue;
1652 
1653 		spin_lock_irqsave(&cmd_mgr->lock, flags);
1654 
1655 		if (io_req->alloc) {
1656 			if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1657 				if (io_req->cmd_type == QEDF_SCSI_CMD)
1658 					QEDF_ERR(&qedf->dbg_ctx,
1659 						 "Allocated but not queued, xid=0x%x\n",
1660 						 io_req->xid);
1661 			}
1662 			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1663 		} else {
1664 			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1665 			continue;
1666 		}
1667 
1668 		if (io_req->fcport != fcport)
1669 			continue;
1670 
1671 		/* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1672 		 * but RRQ is still pending.
1673 		 * Workaround: Within qedf_send_rrq, we check if the fcport is
1674 		 * NULL, and we drop the ref on the io_req to clean it up.
1675 		 */
1676 		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1677 			refcount = kref_read(&io_req->refcount);
1678 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1679 				  "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1680 				  io_req->xid, io_req->cmd_type, refcount);
1681 			/* If RRQ work has been queue, try to cancel it and
1682 			 * free the io_req
1683 			 */
1684 			if (atomic_read(&io_req->state) ==
1685 			    QEDFC_CMD_ST_RRQ_WAIT) {
1686 				if (cancel_delayed_work_sync
1687 				    (&io_req->rrq_work)) {
1688 					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1689 						  "Putting reference for pending RRQ work xid=0x%x.\n",
1690 						  io_req->xid);
1691 					/* ID: 003 */
1692 					kref_put(&io_req->refcount,
1693 						 qedf_release_cmd);
1694 				}
1695 			}
1696 			continue;
1697 		}
1698 
1699 		/* Only consider flushing ELS during target reset */
1700 		if (io_req->cmd_type == QEDF_ELS &&
1701 		    lun == -1) {
1702 			rc = kref_get_unless_zero(&io_req->refcount);
1703 			if (!rc) {
1704 				QEDF_ERR(&(qedf->dbg_ctx),
1705 				    "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1706 				    io_req, io_req->xid);
1707 				continue;
1708 			}
1709 			qedf_initiate_cleanup(io_req, false);
1710 			flush_cnt++;
1711 			qedf_flush_els_req(qedf, io_req);
1712 
1713 			/*
1714 			 * Release the kref and go back to the top of the
1715 			 * loop.
1716 			 */
1717 			goto free_cmd;
1718 		}
1719 
1720 		if (io_req->cmd_type == QEDF_ABTS) {
1721 			/* ID: 004 */
1722 			rc = kref_get_unless_zero(&io_req->refcount);
1723 			if (!rc) {
1724 				QEDF_ERR(&(qedf->dbg_ctx),
1725 				    "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1726 				    io_req, io_req->xid);
1727 				continue;
1728 			}
1729 			if (lun != -1 && io_req->lun != lun)
1730 				goto free_cmd;
1731 
1732 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1733 			    "Flushing abort xid=0x%x.\n", io_req->xid);
1734 
1735 			if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1736 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1737 					  "Putting ref for cancelled RRQ work xid=0x%x.\n",
1738 					  io_req->xid);
1739 				kref_put(&io_req->refcount, qedf_release_cmd);
1740 			}
1741 
1742 			if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1743 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1744 					  "Putting ref for cancelled tmo work xid=0x%x.\n",
1745 					  io_req->xid);
1746 				qedf_initiate_cleanup(io_req, true);
1747 				/* Notify eh_abort handler that ABTS is
1748 				 * complete
1749 				 */
1750 				complete(&io_req->abts_done);
1751 				clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1752 				/* ID: 002 */
1753 				kref_put(&io_req->refcount, qedf_release_cmd);
1754 			}
1755 			flush_cnt++;
1756 			goto free_cmd;
1757 		}
1758 
1759 		if (!io_req->sc_cmd)
1760 			continue;
1761 		if (!io_req->sc_cmd->device) {
1762 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1763 				  "Device backpointer NULL for sc_cmd=%p.\n",
1764 				  io_req->sc_cmd);
1765 			/* Put reference for non-existent scsi_cmnd */
1766 			io_req->sc_cmd = NULL;
1767 			qedf_initiate_cleanup(io_req, false);
1768 			kref_put(&io_req->refcount, qedf_release_cmd);
1769 			continue;
1770 		}
1771 		if (lun > -1) {
1772 			if (io_req->lun != lun)
1773 				continue;
1774 		}
1775 
1776 		/*
1777 		 * Use kref_get_unless_zero in the unlikely case the command
1778 		 * we're about to flush was completed in the normal SCSI path
1779 		 */
1780 		rc = kref_get_unless_zero(&io_req->refcount);
1781 		if (!rc) {
1782 			QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1783 			    "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1784 			continue;
1785 		}
1786 
1787 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1788 		    "Cleanup xid=0x%x.\n", io_req->xid);
1789 		flush_cnt++;
1790 
1791 		/* Cleanup task and return I/O mid-layer */
1792 		qedf_initiate_cleanup(io_req, true);
1793 
1794 free_cmd:
1795 		kref_put(&io_req->refcount, qedf_release_cmd);	/* ID: 004 */
1796 	}
1797 
1798 	wait_cnt = 60;
1799 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1800 		  "Flushed 0x%x I/Os, active=0x%x.\n",
1801 		  flush_cnt, atomic_read(&fcport->num_active_ios));
1802 	/* Only wait for all commands to complete in the Upload context */
1803 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1804 	    (lun == -1)) {
1805 		while (atomic_read(&fcport->num_active_ios)) {
1806 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1807 				  "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1808 				  flush_cnt,
1809 				  atomic_read(&fcport->num_active_ios),
1810 				  wait_cnt);
1811 			if (wait_cnt == 0) {
1812 				QEDF_ERR(&qedf->dbg_ctx,
1813 					 "Flushed %d I/Os, active=%d.\n",
1814 					 flush_cnt,
1815 					 atomic_read(&fcport->num_active_ios));
1816 				for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1817 					io_req = &cmd_mgr->cmds[i];
1818 					if (io_req->fcport &&
1819 					    io_req->fcport == fcport) {
1820 						refcount =
1821 						kref_read(&io_req->refcount);
1822 						set_bit(QEDF_CMD_DIRTY,
1823 							&io_req->flags);
1824 						QEDF_ERR(&qedf->dbg_ctx,
1825 							 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1826 							 io_req, io_req->xid,
1827 							 io_req->flags,
1828 							 io_req->sc_cmd,
1829 							 refcount,
1830 							 io_req->cmd_type);
1831 					}
1832 				}
1833 				WARN_ON(1);
1834 				break;
1835 			}
1836 			msleep(500);
1837 			wait_cnt--;
1838 		}
1839 	}
1840 
1841 	clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1842 	clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1843 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1844 	mutex_unlock(&qedf->flush_mutex);
1845 }
1846 
1847 /*
1848  * Initiate a ABTS middle path command. Note that we don't have to initialize
1849  * the task context for an ABTS task.
1850  */
1851 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1852 {
1853 	struct fc_lport *lport;
1854 	struct qedf_rport *fcport = io_req->fcport;
1855 	struct fc_rport_priv *rdata;
1856 	struct qedf_ctx *qedf;
1857 	u16 xid;
1858 	int rc = 0;
1859 	unsigned long flags;
1860 	struct fcoe_wqe *sqe;
1861 	u16 sqe_idx;
1862 	int refcount = 0;
1863 
1864 	/* Sanity check qedf_rport before dereferencing any pointers */
1865 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1866 		QEDF_ERR(NULL, "tgt not offloaded\n");
1867 		rc = 1;
1868 		goto out;
1869 	}
1870 
1871 	qedf = fcport->qedf;
1872 	rdata = fcport->rdata;
1873 
1874 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1875 		QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1876 		rc = 1;
1877 		goto out;
1878 	}
1879 
1880 	lport = qedf->lport;
1881 
1882 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1883 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1884 		rc = 1;
1885 		goto drop_rdata_kref;
1886 	}
1887 
1888 	if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1889 		QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1890 		rc = 1;
1891 		goto drop_rdata_kref;
1892 	}
1893 
1894 	/* Ensure room on SQ */
1895 	if (!atomic_read(&fcport->free_sqes)) {
1896 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1897 		rc = 1;
1898 		goto drop_rdata_kref;
1899 	}
1900 
1901 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1902 		QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1903 		rc = 1;
1904 		goto drop_rdata_kref;
1905 	}
1906 
1907 	spin_lock_irqsave(&fcport->rport_lock, flags);
1908 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1909 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1910 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1911 		QEDF_ERR(&qedf->dbg_ctx,
1912 			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1913 			 io_req->xid, io_req->sc_cmd);
1914 		rc = 1;
1915 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
1916 		goto drop_rdata_kref;
1917 	}
1918 
1919 	/* Set the command type to abort */
1920 	io_req->cmd_type = QEDF_ABTS;
1921 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1922 
1923 	kref_get(&io_req->refcount);
1924 
1925 	xid = io_req->xid;
1926 	qedf->control_requests++;
1927 	qedf->packet_aborts++;
1928 
1929 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1930 
1931 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1932 	refcount = kref_read(&io_req->refcount);
1933 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1934 		  "ABTS io_req xid = 0x%x refcount=%d\n",
1935 		  xid, refcount);
1936 
1937 	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1938 
1939 	spin_lock_irqsave(&fcport->rport_lock, flags);
1940 
1941 	sqe_idx = qedf_get_sqe_idx(fcport);
1942 	sqe = &fcport->sq[sqe_idx];
1943 	memset(sqe, 0, sizeof(struct fcoe_wqe));
1944 	io_req->task_params->sqe = sqe;
1945 
1946 	init_initiator_abort_fcoe_task(io_req->task_params);
1947 	qedf_ring_doorbell(fcport);
1948 
1949 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1950 
1951 drop_rdata_kref:
1952 	kref_put(&rdata->kref, fc_rport_destroy);
1953 out:
1954 	return rc;
1955 }
1956 
1957 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1958 	struct qedf_ioreq *io_req)
1959 {
1960 	uint32_t r_ctl;
1961 	int rc;
1962 	struct qedf_rport *fcport = io_req->fcport;
1963 
1964 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1965 		   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1966 
1967 	r_ctl = cqe->cqe_info.abts_info.r_ctl;
1968 
1969 	/* This was added at a point when we were scheduling abts_compl &
1970 	 * cleanup_compl on different CPUs and there was a possibility of
1971 	 * the io_req to be freed from the other context before we got here.
1972 	 */
1973 	if (!fcport) {
1974 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1975 			  "Dropping ABTS completion xid=0x%x as fcport is NULL",
1976 			  io_req->xid);
1977 		return;
1978 	}
1979 
1980 	/*
1981 	 * When flush is active, let the cmds be completed from the cleanup
1982 	 * context
1983 	 */
1984 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1985 	    test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1986 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1987 			  "Dropping ABTS completion xid=0x%x as fcport is flushing",
1988 			  io_req->xid);
1989 		return;
1990 	}
1991 
1992 	if (!cancel_delayed_work(&io_req->timeout_work)) {
1993 		QEDF_ERR(&qedf->dbg_ctx,
1994 			 "Wasn't able to cancel abts timeout work.\n");
1995 	}
1996 
1997 	switch (r_ctl) {
1998 	case FC_RCTL_BA_ACC:
1999 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2000 		    "ABTS response - ACC Send RRQ after R_A_TOV\n");
2001 		io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2002 		rc = kref_get_unless_zero(&io_req->refcount);	/* ID: 003 */
2003 		if (!rc) {
2004 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2005 				  "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2006 				  io_req->xid);
2007 			return;
2008 		}
2009 		/*
2010 		 * Dont release this cmd yet. It will be relesed
2011 		 * after we get RRQ response
2012 		 */
2013 		queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2014 		    msecs_to_jiffies(qedf->lport->r_a_tov));
2015 		atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2016 		break;
2017 	/* For error cases let the cleanup return the command */
2018 	case FC_RCTL_BA_RJT:
2019 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2020 		   "ABTS response - RJT\n");
2021 		io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2022 		break;
2023 	default:
2024 		QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2025 		break;
2026 	}
2027 
2028 	clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2029 
2030 	if (io_req->sc_cmd) {
2031 		if (!io_req->return_scsi_cmd_on_abts)
2032 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2033 				  "Not call scsi_done for xid=0x%x.\n",
2034 				  io_req->xid);
2035 		if (io_req->return_scsi_cmd_on_abts)
2036 			qedf_scsi_done(qedf, io_req, DID_ERROR);
2037 	}
2038 
2039 	/* Notify eh_abort handler that ABTS is complete */
2040 	complete(&io_req->abts_done);
2041 
2042 	kref_put(&io_req->refcount, qedf_release_cmd);
2043 }
2044 
2045 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2046 {
2047 	struct qedf_mp_req *mp_req;
2048 	struct scsi_sge *mp_req_bd;
2049 	struct scsi_sge *mp_resp_bd;
2050 	struct qedf_ctx *qedf = io_req->fcport->qedf;
2051 	dma_addr_t addr;
2052 	uint64_t sz;
2053 
2054 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2055 
2056 	mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2057 	memset(mp_req, 0, sizeof(struct qedf_mp_req));
2058 
2059 	if (io_req->cmd_type != QEDF_ELS) {
2060 		mp_req->req_len = sizeof(struct fcp_cmnd);
2061 		io_req->data_xfer_len = mp_req->req_len;
2062 	} else
2063 		mp_req->req_len = io_req->data_xfer_len;
2064 
2065 	mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2066 	    &mp_req->req_buf_dma, GFP_KERNEL);
2067 	if (!mp_req->req_buf) {
2068 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2069 		qedf_free_mp_resc(io_req);
2070 		return -ENOMEM;
2071 	}
2072 
2073 	mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2074 	    QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2075 	if (!mp_req->resp_buf) {
2076 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2077 			  "buffer\n");
2078 		qedf_free_mp_resc(io_req);
2079 		return -ENOMEM;
2080 	}
2081 
2082 	/* Allocate and map mp_req_bd and mp_resp_bd */
2083 	sz = sizeof(struct scsi_sge);
2084 	mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2085 	    &mp_req->mp_req_bd_dma, GFP_KERNEL);
2086 	if (!mp_req->mp_req_bd) {
2087 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2088 		qedf_free_mp_resc(io_req);
2089 		return -ENOMEM;
2090 	}
2091 
2092 	mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2093 	    &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2094 	if (!mp_req->mp_resp_bd) {
2095 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2096 		qedf_free_mp_resc(io_req);
2097 		return -ENOMEM;
2098 	}
2099 
2100 	/* Fill bd table */
2101 	addr = mp_req->req_buf_dma;
2102 	mp_req_bd = mp_req->mp_req_bd;
2103 	mp_req_bd->sge_addr.lo = U64_LO(addr);
2104 	mp_req_bd->sge_addr.hi = U64_HI(addr);
2105 	mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2106 
2107 	/*
2108 	 * MP buffer is either a task mgmt command or an ELS.
2109 	 * So the assumption is that it consumes a single bd
2110 	 * entry in the bd table
2111 	 */
2112 	mp_resp_bd = mp_req->mp_resp_bd;
2113 	addr = mp_req->resp_buf_dma;
2114 	mp_resp_bd->sge_addr.lo = U64_LO(addr);
2115 	mp_resp_bd->sge_addr.hi = U64_HI(addr);
2116 	mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2117 
2118 	return 0;
2119 }
2120 
2121 /*
2122  * Last ditch effort to clear the port if it's stuck. Used only after a
2123  * cleanup task times out.
2124  */
2125 static void qedf_drain_request(struct qedf_ctx *qedf)
2126 {
2127 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2128 		QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2129 		return;
2130 	}
2131 
2132 	/* Set bit to return all queuecommand requests as busy */
2133 	set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2134 
2135 	/* Call qed drain request for function. Should be synchronous */
2136 	qed_ops->common->drain(qedf->cdev);
2137 
2138 	/* Settle time for CQEs to be returned */
2139 	msleep(100);
2140 
2141 	/* Unplug and continue */
2142 	clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2143 }
2144 
2145 /*
2146  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2147  * FAILURE.
2148  */
2149 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2150 	bool return_scsi_cmd_on_abts)
2151 {
2152 	struct qedf_rport *fcport;
2153 	struct qedf_ctx *qedf;
2154 	int tmo = 0;
2155 	int rc = SUCCESS;
2156 	unsigned long flags;
2157 	struct fcoe_wqe *sqe;
2158 	u16 sqe_idx;
2159 	int refcount = 0;
2160 
2161 	fcport = io_req->fcport;
2162 	if (!fcport) {
2163 		QEDF_ERR(NULL, "fcport is NULL.\n");
2164 		return SUCCESS;
2165 	}
2166 
2167 	/* Sanity check qedf_rport before dereferencing any pointers */
2168 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2169 		QEDF_ERR(NULL, "tgt not offloaded\n");
2170 		return SUCCESS;
2171 	}
2172 
2173 	qedf = fcport->qedf;
2174 	if (!qedf) {
2175 		QEDF_ERR(NULL, "qedf is NULL.\n");
2176 		return SUCCESS;
2177 	}
2178 
2179 	if (io_req->cmd_type == QEDF_ELS) {
2180 		goto process_els;
2181 	}
2182 
2183 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2184 	    test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2185 		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2186 			  "cleanup processing or already completed.\n",
2187 			  io_req->xid);
2188 		return SUCCESS;
2189 	}
2190 	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2191 
2192 process_els:
2193 	/* Ensure room on SQ */
2194 	if (!atomic_read(&fcport->free_sqes)) {
2195 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2196 		/* Need to make sure we clear the flag since it was set */
2197 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2198 		return FAILED;
2199 	}
2200 
2201 	if (io_req->cmd_type == QEDF_CLEANUP) {
2202 		QEDF_ERR(&qedf->dbg_ctx,
2203 			 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2204 			 io_req->xid, io_req->cmd_type);
2205 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2206 		return SUCCESS;
2207 	}
2208 
2209 	refcount = kref_read(&io_req->refcount);
2210 
2211 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2212 		  "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2213 		  io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2214 		  refcount, fcport, fcport->rdata->ids.port_id);
2215 
2216 	/* Cleanup cmds re-use the same TID as the original I/O */
2217 	spin_lock_irqsave(&fcport->rport_lock, flags);
2218 	io_req->cmd_type = QEDF_CLEANUP;
2219 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2220 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2221 
2222 	init_completion(&io_req->cleanup_done);
2223 
2224 	spin_lock_irqsave(&fcport->rport_lock, flags);
2225 
2226 	sqe_idx = qedf_get_sqe_idx(fcport);
2227 	sqe = &fcport->sq[sqe_idx];
2228 	memset(sqe, 0, sizeof(struct fcoe_wqe));
2229 	io_req->task_params->sqe = sqe;
2230 
2231 	init_initiator_cleanup_fcoe_task(io_req->task_params);
2232 	qedf_ring_doorbell(fcport);
2233 
2234 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2235 
2236 	tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2237 					  QEDF_CLEANUP_TIMEOUT * HZ);
2238 
2239 	if (!tmo) {
2240 		rc = FAILED;
2241 		/* Timeout case */
2242 		QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2243 			  "xid=%x.\n", io_req->xid);
2244 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2245 		/* Issue a drain request if cleanup task times out */
2246 		QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2247 		qedf_drain_request(qedf);
2248 	}
2249 
2250 	/* If it TASK MGMT handle it, reference will be decreased
2251 	 * in qedf_execute_tmf
2252 	 */
2253 	if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
2254 	    io_req->tm_flags == FCP_TMF_TGT_RESET) {
2255 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2256 		io_req->sc_cmd = NULL;
2257 		kref_put(&io_req->refcount, qedf_release_cmd);
2258 		complete(&io_req->tm_done);
2259 	}
2260 
2261 	if (io_req->sc_cmd) {
2262 		if (!io_req->return_scsi_cmd_on_abts)
2263 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2264 				  "Not call scsi_done for xid=0x%x.\n",
2265 				  io_req->xid);
2266 		if (io_req->return_scsi_cmd_on_abts)
2267 			qedf_scsi_done(qedf, io_req, DID_ERROR);
2268 	}
2269 
2270 	if (rc == SUCCESS)
2271 		io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2272 	else
2273 		io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2274 
2275 	return rc;
2276 }
2277 
2278 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2279 	struct qedf_ioreq *io_req)
2280 {
2281 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2282 		   io_req->xid);
2283 
2284 	clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2285 
2286 	/* Complete so we can finish cleaning up the I/O */
2287 	complete(&io_req->cleanup_done);
2288 }
2289 
2290 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2291 	uint8_t tm_flags)
2292 {
2293 	struct qedf_ioreq *io_req;
2294 	struct fcoe_task_context *task;
2295 	struct qedf_ctx *qedf = fcport->qedf;
2296 	struct fc_lport *lport = qedf->lport;
2297 	int rc = 0;
2298 	uint16_t xid;
2299 	int tmo = 0;
2300 	int lun = 0;
2301 	unsigned long flags;
2302 	struct fcoe_wqe *sqe;
2303 	u16 sqe_idx;
2304 
2305 	if (!sc_cmd) {
2306 		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2307 		return FAILED;
2308 	}
2309 
2310 	lun = (int)sc_cmd->device->lun;
2311 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2312 		QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2313 		rc = FAILED;
2314 		goto no_flush;
2315 	}
2316 
2317 	io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2318 	if (!io_req) {
2319 		QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2320 		rc = -EAGAIN;
2321 		goto no_flush;
2322 	}
2323 
2324 	if (tm_flags == FCP_TMF_LUN_RESET)
2325 		qedf->lun_resets++;
2326 	else if (tm_flags == FCP_TMF_TGT_RESET)
2327 		qedf->target_resets++;
2328 
2329 	/* Initialize rest of io_req fields */
2330 	io_req->sc_cmd = sc_cmd;
2331 	io_req->fcport = fcport;
2332 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2333 
2334 	/* Record which cpu this request is associated with */
2335 	io_req->cpu = smp_processor_id();
2336 
2337 	/* Set TM flags */
2338 	io_req->io_req_flags = QEDF_READ;
2339 	io_req->data_xfer_len = 0;
2340 	io_req->tm_flags = tm_flags;
2341 
2342 	/* Default is to return a SCSI command when an error occurs */
2343 	io_req->return_scsi_cmd_on_abts = false;
2344 
2345 	/* Obtain exchange id */
2346 	xid = io_req->xid;
2347 
2348 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2349 		   "0x%x\n", xid);
2350 
2351 	/* Initialize task context for this IO request */
2352 	task = qedf_get_task_mem(&qedf->tasks, xid);
2353 
2354 	init_completion(&io_req->tm_done);
2355 
2356 	spin_lock_irqsave(&fcport->rport_lock, flags);
2357 
2358 	sqe_idx = qedf_get_sqe_idx(fcport);
2359 	sqe = &fcport->sq[sqe_idx];
2360 	memset(sqe, 0, sizeof(struct fcoe_wqe));
2361 
2362 	qedf_init_task(fcport, lport, io_req, task, sqe);
2363 	qedf_ring_doorbell(fcport);
2364 
2365 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2366 
2367 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2368 	tmo = wait_for_completion_timeout(&io_req->tm_done,
2369 	    QEDF_TM_TIMEOUT * HZ);
2370 
2371 	if (!tmo) {
2372 		rc = FAILED;
2373 		QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2374 		/* Clear outstanding bit since command timed out */
2375 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2376 		io_req->sc_cmd = NULL;
2377 	} else {
2378 		/* Check TMF response code */
2379 		if (io_req->fcp_rsp_code == 0)
2380 			rc = SUCCESS;
2381 		else
2382 			rc = FAILED;
2383 	}
2384 	/*
2385 	 * Double check that fcport has not gone into an uploading state before
2386 	 * executing the command flush for the LUN/target.
2387 	 */
2388 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2389 		QEDF_ERR(&qedf->dbg_ctx,
2390 			 "fcport is uploading, not executing flush.\n");
2391 		goto no_flush;
2392 	}
2393 	/* We do not need this io_req any more */
2394 	kref_put(&io_req->refcount, qedf_release_cmd);
2395 
2396 
2397 	if (tm_flags == FCP_TMF_LUN_RESET)
2398 		qedf_flush_active_ios(fcport, lun);
2399 	else
2400 		qedf_flush_active_ios(fcport, -1);
2401 
2402 no_flush:
2403 	if (rc != SUCCESS) {
2404 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2405 		rc = FAILED;
2406 	} else {
2407 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2408 		rc = SUCCESS;
2409 	}
2410 	return rc;
2411 }
2412 
2413 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2414 {
2415 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2416 	struct fc_rport_libfc_priv *rp = rport->dd_data;
2417 	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2418 	struct qedf_ctx *qedf;
2419 	struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2420 	int rc = SUCCESS;
2421 	int rval;
2422 	struct qedf_ioreq *io_req = NULL;
2423 	int ref_cnt = 0;
2424 	struct fc_rport_priv *rdata = fcport->rdata;
2425 
2426 	QEDF_ERR(NULL,
2427 		 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2428 		 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2429 		 rport->scsi_target_id, (int)sc_cmd->device->lun);
2430 
2431 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2432 		QEDF_ERR(NULL, "stale rport\n");
2433 		return FAILED;
2434 	}
2435 
2436 	QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2437 		 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2438 		 "LUN RESET");
2439 
2440 	if (qedf_priv(sc_cmd)->io_req) {
2441 		io_req = qedf_priv(sc_cmd)->io_req;
2442 		ref_cnt = kref_read(&io_req->refcount);
2443 		QEDF_ERR(NULL,
2444 			 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2445 			 io_req, io_req->xid, ref_cnt);
2446 	}
2447 
2448 	rval = fc_remote_port_chkready(rport);
2449 	if (rval) {
2450 		QEDF_ERR(NULL, "device_reset rport not ready\n");
2451 		rc = FAILED;
2452 		goto tmf_err;
2453 	}
2454 
2455 	rc = fc_block_scsi_eh(sc_cmd);
2456 	if (rc)
2457 		goto tmf_err;
2458 
2459 	if (!fcport) {
2460 		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2461 		rc = FAILED;
2462 		goto tmf_err;
2463 	}
2464 
2465 	qedf = fcport->qedf;
2466 
2467 	if (!qedf) {
2468 		QEDF_ERR(NULL, "qedf is NULL.\n");
2469 		rc = FAILED;
2470 		goto tmf_err;
2471 	}
2472 
2473 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2474 		QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2475 		rc = SUCCESS;
2476 		goto tmf_err;
2477 	}
2478 
2479 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2480 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2481 		rc = SUCCESS;
2482 		goto tmf_err;
2483 	}
2484 
2485 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2486 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2487 		rc = FAILED;
2488 		goto tmf_err;
2489 	}
2490 
2491 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2492 		if (!fcport->rdata)
2493 			QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2494 				 fcport);
2495 		else
2496 			QEDF_ERR(&qedf->dbg_ctx,
2497 				 "fcport %p port_id=%06x is uploading.\n",
2498 				 fcport, fcport->rdata->ids.port_id);
2499 		rc = FAILED;
2500 		goto tmf_err;
2501 	}
2502 
2503 	rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2504 
2505 tmf_err:
2506 	kref_put(&rdata->kref, fc_rport_destroy);
2507 	return rc;
2508 }
2509 
2510 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2511 	struct qedf_ioreq *io_req)
2512 {
2513 	struct fcoe_cqe_rsp_info *fcp_rsp;
2514 
2515 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2516 
2517 	fcp_rsp = &cqe->cqe_info.rsp_info;
2518 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
2519 
2520 	io_req->sc_cmd = NULL;
2521 	complete(&io_req->tm_done);
2522 }
2523 
2524 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2525 	struct fcoe_cqe *cqe)
2526 {
2527 	unsigned long flags;
2528 	uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2529 	u32 payload_len, crc;
2530 	struct fc_frame_header *fh;
2531 	struct fc_frame *fp;
2532 	struct qedf_io_work *io_work;
2533 	u32 bdq_idx;
2534 	void *bdq_addr;
2535 	struct scsi_bd *p_bd_info;
2536 
2537 	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2538 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2539 		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2540 		  le32_to_cpu(p_bd_info->address.hi),
2541 		  le32_to_cpu(p_bd_info->address.lo),
2542 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2543 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2544 		  qedf->bdq_prod_idx, pktlen);
2545 
2546 	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2547 	if (bdq_idx >= QEDF_BDQ_SIZE) {
2548 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2549 		    bdq_idx);
2550 		goto increment_prod;
2551 	}
2552 
2553 	bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2554 	if (!bdq_addr) {
2555 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2556 		    "unsolicited packet.\n");
2557 		goto increment_prod;
2558 	}
2559 
2560 	if (qedf_dump_frames) {
2561 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2562 		    "BDQ frame is at addr=%p.\n", bdq_addr);
2563 		print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2564 		    (void *)bdq_addr, pktlen, false);
2565 	}
2566 
2567 	/* Allocate frame */
2568 	payload_len = pktlen - sizeof(struct fc_frame_header);
2569 	fp = fc_frame_alloc(qedf->lport, payload_len);
2570 	if (!fp) {
2571 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2572 		goto increment_prod;
2573 	}
2574 
2575 	/* Copy data from BDQ buffer into fc_frame struct */
2576 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2577 	memcpy(fh, (void *)bdq_addr, pktlen);
2578 
2579 	QEDF_WARN(&qedf->dbg_ctx,
2580 		  "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2581 		  ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2582 		  fh->fh_type, fc_frame_payload_op(fp));
2583 
2584 	/* Initialize the frame so libfc sees it as a valid frame */
2585 	crc = fcoe_fc_crc(fp);
2586 	fc_frame_init(fp);
2587 	fr_dev(fp) = qedf->lport;
2588 	fr_sof(fp) = FC_SOF_I3;
2589 	fr_eof(fp) = FC_EOF_T;
2590 	fr_crc(fp) = cpu_to_le32(~crc);
2591 
2592 	/*
2593 	 * We need to return the frame back up to libfc in a non-atomic
2594 	 * context
2595 	 */
2596 	io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2597 	if (!io_work) {
2598 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2599 			   "work for I/O completion.\n");
2600 		fc_frame_free(fp);
2601 		goto increment_prod;
2602 	}
2603 	memset(io_work, 0, sizeof(struct qedf_io_work));
2604 
2605 	INIT_WORK(&io_work->work, qedf_fp_io_handler);
2606 
2607 	/* Copy contents of CQE for deferred processing */
2608 	memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2609 
2610 	io_work->qedf = qedf;
2611 	io_work->fp = fp;
2612 
2613 	queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2614 increment_prod:
2615 	spin_lock_irqsave(&qedf->hba_lock, flags);
2616 
2617 	/* Increment producer to let f/w know we've handled the frame */
2618 	qedf->bdq_prod_idx++;
2619 
2620 	/* Producer index wraps at uint16_t boundary */
2621 	if (qedf->bdq_prod_idx == 0xffff)
2622 		qedf->bdq_prod_idx = 0;
2623 
2624 	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2625 	readw(qedf->bdq_primary_prod);
2626 	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2627 	readw(qedf->bdq_secondary_prod);
2628 
2629 	spin_unlock_irqrestore(&qedf->hba_lock, flags);
2630 }
2631