xref: /linux/drivers/scsi/qla2xxx/qla_nvme.c (revision 9a6b55ac)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12 
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14 
15 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
16 {
17 	struct qla_nvme_rport *rport;
18 	struct nvme_fc_port_info req;
19 	int ret;
20 
21 	if (!IS_ENABLED(CONFIG_NVME_FC))
22 		return 0;
23 
24 	if (!vha->flags.nvme_enabled) {
25 		ql_log(ql_log_info, vha, 0x2100,
26 		    "%s: Not registering target since Host NVME is not enabled\n",
27 		    __func__);
28 		return 0;
29 	}
30 
31 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
32 		return 0;
33 
34 	if (!(fcport->nvme_prli_service_param &
35 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
36 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
37 		return 0;
38 
39 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
40 
41 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
42 	req.port_name = wwn_to_u64(fcport->port_name);
43 	req.node_name = wwn_to_u64(fcport->node_name);
44 	req.port_role = 0;
45 	req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
46 
47 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
48 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
49 
50 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
51 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
52 
53 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
54 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
55 
56 	req.port_id = fcport->d_id.b24;
57 
58 	ql_log(ql_log_info, vha, 0x2102,
59 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
60 	    __func__, req.node_name, req.port_name,
61 	    req.port_id);
62 
63 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
64 	    &fcport->nvme_remote_port);
65 	if (ret) {
66 		ql_log(ql_log_warn, vha, 0x212e,
67 		    "Failed to register remote port. Transport returned %d\n",
68 		    ret);
69 		return ret;
70 	}
71 
72 	rport = fcport->nvme_remote_port->private;
73 	rport->fcport = fcport;
74 
75 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
76 	return 0;
77 }
78 
79 /* Allocate a queue for NVMe traffic */
80 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
81     unsigned int qidx, u16 qsize, void **handle)
82 {
83 	struct scsi_qla_host *vha;
84 	struct qla_hw_data *ha;
85 	struct qla_qpair *qpair;
86 
87 	if (!qidx)
88 		qidx++;
89 
90 	vha = (struct scsi_qla_host *)lport->private;
91 	ha = vha->hw;
92 
93 	ql_log(ql_log_info, vha, 0x2104,
94 	    "%s: handle %p, idx =%d, qsize %d\n",
95 	    __func__, handle, qidx, qsize);
96 
97 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
98 		ql_log(ql_log_warn, vha, 0x212f,
99 		    "%s: Illegal qidx=%d. Max=%d\n",
100 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
101 		return -EINVAL;
102 	}
103 
104 	if (ha->queue_pair_map[qidx]) {
105 		*handle = ha->queue_pair_map[qidx];
106 		ql_log(ql_log_info, vha, 0x2121,
107 		    "Returning existing qpair of %p for idx=%x\n",
108 		    *handle, qidx);
109 		return 0;
110 	}
111 
112 	qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
113 	if (qpair == NULL) {
114 		ql_log(ql_log_warn, vha, 0x2122,
115 		    "Failed to allocate qpair\n");
116 		return -EINVAL;
117 	}
118 	*handle = qpair;
119 
120 	return 0;
121 }
122 
123 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
124 {
125 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
126 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
127 	struct nvmefc_fcp_req *fd;
128 	struct srb_iocb *nvme;
129 	unsigned long flags;
130 
131 	if (!priv)
132 		goto out;
133 
134 	nvme = &sp->u.iocb_cmd;
135 	fd = nvme->u.nvme.desc;
136 
137 	spin_lock_irqsave(&priv->cmd_lock, flags);
138 	priv->sp = NULL;
139 	sp->priv = NULL;
140 	if (priv->comp_status == QLA_SUCCESS) {
141 		fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
142 	} else {
143 		fd->rcv_rsplen = 0;
144 		fd->transferred_length = 0;
145 	}
146 	fd->status = 0;
147 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
148 
149 	fd->done(fd);
150 out:
151 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
152 }
153 
154 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
155 {
156 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
157 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
158 	struct nvmefc_ls_req *fd;
159 	unsigned long flags;
160 
161 	if (!priv)
162 		goto out;
163 
164 	spin_lock_irqsave(&priv->cmd_lock, flags);
165 	priv->sp = NULL;
166 	sp->priv = NULL;
167 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
168 
169 	fd = priv->fd;
170 	fd->done(fd, priv->comp_status);
171 out:
172 	qla2x00_rel_sp(sp);
173 }
174 
175 static void qla_nvme_ls_complete(struct work_struct *work)
176 {
177 	struct nvme_private *priv =
178 		container_of(work, struct nvme_private, ls_work);
179 
180 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
181 }
182 
183 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
184 {
185 	struct nvme_private *priv = sp->priv;
186 
187 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
188 		return;
189 
190 	if (res)
191 		res = -EINVAL;
192 
193 	priv->comp_status = res;
194 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
195 	schedule_work(&priv->ls_work);
196 }
197 
198 /* it assumed that QPair lock is held. */
199 static void qla_nvme_sp_done(srb_t *sp, int res)
200 {
201 	struct nvme_private *priv = sp->priv;
202 
203 	priv->comp_status = res;
204 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
205 
206 	return;
207 }
208 
209 static void qla_nvme_abort_work(struct work_struct *work)
210 {
211 	struct nvme_private *priv =
212 		container_of(work, struct nvme_private, abort_work);
213 	srb_t *sp = priv->sp;
214 	fc_port_t *fcport = sp->fcport;
215 	struct qla_hw_data *ha = fcport->vha->hw;
216 	int rval;
217 
218 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
219 	       "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
220 	       __func__, sp, sp->handle, fcport, fcport->deleted);
221 
222 	if (!ha->flags.fw_started && fcport->deleted)
223 		goto out;
224 
225 	if (ha->flags.host_shutting_down) {
226 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
227 		    "%s Calling done on sp: %p, type: 0x%x\n",
228 		    __func__, sp, sp->type);
229 		sp->done(sp, 0);
230 		goto out;
231 	}
232 
233 	rval = ha->isp_ops->abort_command(sp);
234 
235 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
236 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
237 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
238 	    sp, sp->handle, fcport, rval);
239 
240 out:
241 	/* kref_get was done before work was schedule. */
242 	kref_put(&sp->cmd_kref, sp->put_fn);
243 }
244 
245 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
246     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
247 {
248 	struct nvme_private *priv = fd->private;
249 	unsigned long flags;
250 
251 	spin_lock_irqsave(&priv->cmd_lock, flags);
252 	if (!priv->sp) {
253 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
254 		return;
255 	}
256 
257 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
258 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
259 		return;
260 	}
261 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
262 
263 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
264 	schedule_work(&priv->abort_work);
265 }
266 
267 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
268     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
269 {
270 	struct qla_nvme_rport *qla_rport = rport->private;
271 	fc_port_t *fcport = qla_rport->fcport;
272 	struct srb_iocb   *nvme;
273 	struct nvme_private *priv = fd->private;
274 	struct scsi_qla_host *vha;
275 	int     rval = QLA_FUNCTION_FAILED;
276 	struct qla_hw_data *ha;
277 	srb_t           *sp;
278 
279 
280 	if (!fcport || (fcport && fcport->deleted))
281 		return rval;
282 
283 	vha = fcport->vha;
284 	ha = vha->hw;
285 
286 	if (!ha->flags.fw_started)
287 		return rval;
288 
289 	/* Alloc SRB structure */
290 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
291 	if (!sp)
292 		return rval;
293 
294 	sp->type = SRB_NVME_LS;
295 	sp->name = "nvme_ls";
296 	sp->done = qla_nvme_sp_ls_done;
297 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
298 	sp->priv = (void *)priv;
299 	priv->sp = sp;
300 	kref_init(&sp->cmd_kref);
301 	spin_lock_init(&priv->cmd_lock);
302 	nvme = &sp->u.iocb_cmd;
303 	priv->fd = fd;
304 	nvme->u.nvme.desc = fd;
305 	nvme->u.nvme.dir = 0;
306 	nvme->u.nvme.dl = 0;
307 	nvme->u.nvme.cmd_len = fd->rqstlen;
308 	nvme->u.nvme.rsp_len = fd->rsplen;
309 	nvme->u.nvme.rsp_dma = fd->rspdma;
310 	nvme->u.nvme.timeout_sec = fd->timeout;
311 	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
312 	    fd->rqstlen, DMA_TO_DEVICE);
313 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
314 	    fd->rqstlen, DMA_TO_DEVICE);
315 
316 	rval = qla2x00_start_sp(sp);
317 	if (rval != QLA_SUCCESS) {
318 		ql_log(ql_log_warn, vha, 0x700e,
319 		    "qla2x00_start_sp failed = %d\n", rval);
320 		wake_up(&sp->nvme_ls_waitq);
321 		sp->priv = NULL;
322 		priv->sp = NULL;
323 		qla2x00_rel_sp(sp);
324 		return rval;
325 	}
326 
327 	return rval;
328 }
329 
330 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
331     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
332     struct nvmefc_fcp_req *fd)
333 {
334 	struct nvme_private *priv = fd->private;
335 	unsigned long flags;
336 
337 	spin_lock_irqsave(&priv->cmd_lock, flags);
338 	if (!priv->sp) {
339 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
340 		return;
341 	}
342 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
343 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
344 		return;
345 	}
346 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
347 
348 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
349 	schedule_work(&priv->abort_work);
350 }
351 
352 static inline int qla2x00_start_nvme_mq(srb_t *sp)
353 {
354 	unsigned long   flags;
355 	uint32_t        *clr_ptr;
356 	uint32_t        handle;
357 	struct cmd_nvme *cmd_pkt;
358 	uint16_t        cnt, i;
359 	uint16_t        req_cnt;
360 	uint16_t        tot_dsds;
361 	uint16_t	avail_dsds;
362 	struct dsd64	*cur_dsd;
363 	struct req_que *req = NULL;
364 	struct scsi_qla_host *vha = sp->fcport->vha;
365 	struct qla_hw_data *ha = vha->hw;
366 	struct qla_qpair *qpair = sp->qpair;
367 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
368 	struct scatterlist *sgl, *sg;
369 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
370 	uint32_t        rval = QLA_SUCCESS;
371 
372 	/* Setup qpair pointers */
373 	req = qpair->req;
374 	tot_dsds = fd->sg_cnt;
375 
376 	/* Acquire qpair specific lock */
377 	spin_lock_irqsave(&qpair->qp_lock, flags);
378 
379 	handle = qla2xxx_get_next_handle(req);
380 	if (handle == 0) {
381 		rval = -EBUSY;
382 		goto queuing_error;
383 	}
384 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
385 	if (req->cnt < (req_cnt + 2)) {
386 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
387 		    RD_REG_DWORD_RELAXED(req->req_q_out);
388 
389 		if (req->ring_index < cnt)
390 			req->cnt = cnt - req->ring_index;
391 		else
392 			req->cnt = req->length - (req->ring_index - cnt);
393 
394 		if (req->cnt < (req_cnt + 2)){
395 			rval = -EBUSY;
396 			goto queuing_error;
397 		}
398 	}
399 
400 	if (unlikely(!fd->sqid)) {
401 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
402 
403 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
404 			nvme->u.nvme.aen_op = 1;
405 			atomic_inc(&ha->nvme_active_aen_cnt);
406 		}
407 	}
408 
409 	/* Build command packet. */
410 	req->current_outstanding_cmd = handle;
411 	req->outstanding_cmds[handle] = sp;
412 	sp->handle = handle;
413 	req->cnt -= req_cnt;
414 
415 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
416 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
417 
418 	/* Zero out remaining portion of packet. */
419 	clr_ptr = (uint32_t *)cmd_pkt + 2;
420 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
421 
422 	cmd_pkt->entry_status = 0;
423 
424 	/* Update entry type to indicate Command NVME IOCB */
425 	cmd_pkt->entry_type = COMMAND_NVME;
426 
427 	/* No data transfer how do we check buffer len == 0?? */
428 	if (fd->io_dir == NVMEFC_FCP_READ) {
429 		cmd_pkt->control_flags = CF_READ_DATA;
430 		vha->qla_stats.input_bytes += fd->payload_length;
431 		vha->qla_stats.input_requests++;
432 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
433 		cmd_pkt->control_flags = CF_WRITE_DATA;
434 		if ((vha->flags.nvme_first_burst) &&
435 		    (sp->fcport->nvme_prli_service_param &
436 			NVME_PRLI_SP_FIRST_BURST)) {
437 			if ((fd->payload_length <=
438 			    sp->fcport->nvme_first_burst_size) ||
439 				(sp->fcport->nvme_first_burst_size == 0))
440 				cmd_pkt->control_flags |=
441 				    CF_NVME_FIRST_BURST_ENABLE;
442 		}
443 		vha->qla_stats.output_bytes += fd->payload_length;
444 		vha->qla_stats.output_requests++;
445 	} else if (fd->io_dir == 0) {
446 		cmd_pkt->control_flags = 0;
447 	}
448 
449 	/* Set NPORT-ID */
450 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
451 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
452 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
453 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
454 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
455 
456 	/* NVME RSP IU */
457 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
458 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
459 
460 	/* NVME CNMD IU */
461 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
462 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
463 
464 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
465 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
466 
467 	/* One DSD is available in the Command Type NVME IOCB */
468 	avail_dsds = 1;
469 	cur_dsd = &cmd_pkt->nvme_dsd;
470 	sgl = fd->first_sgl;
471 
472 	/* Load data segments */
473 	for_each_sg(sgl, sg, tot_dsds, i) {
474 		cont_a64_entry_t *cont_pkt;
475 
476 		/* Allocate additional continuation packets? */
477 		if (avail_dsds == 0) {
478 			/*
479 			 * Five DSDs are available in the Continuation
480 			 * Type 1 IOCB.
481 			 */
482 
483 			/* Adjust ring index */
484 			req->ring_index++;
485 			if (req->ring_index == req->length) {
486 				req->ring_index = 0;
487 				req->ring_ptr = req->ring;
488 			} else {
489 				req->ring_ptr++;
490 			}
491 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
492 			put_unaligned_le32(CONTINUE_A64_TYPE,
493 					   &cont_pkt->entry_type);
494 
495 			cur_dsd = cont_pkt->dsd;
496 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
497 		}
498 
499 		append_dsd64(&cur_dsd, sg);
500 		avail_dsds--;
501 	}
502 
503 	/* Set total entry count. */
504 	cmd_pkt->entry_count = (uint8_t)req_cnt;
505 	wmb();
506 
507 	/* Adjust ring index. */
508 	req->ring_index++;
509 	if (req->ring_index == req->length) {
510 		req->ring_index = 0;
511 		req->ring_ptr = req->ring;
512 	} else {
513 		req->ring_ptr++;
514 	}
515 
516 	/* Set chip new ring index. */
517 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
518 
519 queuing_error:
520 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
521 	return rval;
522 }
523 
524 /* Post a command */
525 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
526     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
527     struct nvmefc_fcp_req *fd)
528 {
529 	fc_port_t *fcport;
530 	struct srb_iocb *nvme;
531 	struct scsi_qla_host *vha;
532 	int rval = -ENODEV;
533 	srb_t *sp;
534 	struct qla_qpair *qpair = hw_queue_handle;
535 	struct nvme_private *priv = fd->private;
536 	struct qla_nvme_rport *qla_rport = rport->private;
537 
538 	fcport = qla_rport->fcport;
539 
540 	if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
541 	    (fcport && fcport->deleted))
542 		return rval;
543 
544 	vha = fcport->vha;
545 	/*
546 	 * If we know the dev is going away while the transport is still sending
547 	 * IO's return busy back to stall the IO Q.  This happens when the
548 	 * link goes away and fw hasn't notified us yet, but IO's are being
549 	 * returned. If the dev comes back quickly we won't exhaust the IO
550 	 * retry count at the core.
551 	 */
552 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
553 		return -EBUSY;
554 
555 	/* Alloc SRB structure */
556 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
557 	if (!sp)
558 		return -EBUSY;
559 
560 	init_waitqueue_head(&sp->nvme_ls_waitq);
561 	kref_init(&sp->cmd_kref);
562 	spin_lock_init(&priv->cmd_lock);
563 	sp->priv = (void *)priv;
564 	priv->sp = sp;
565 	sp->type = SRB_NVME_CMD;
566 	sp->name = "nvme_cmd";
567 	sp->done = qla_nvme_sp_done;
568 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
569 	sp->qpair = qpair;
570 	sp->vha = vha;
571 	nvme = &sp->u.iocb_cmd;
572 	nvme->u.nvme.desc = fd;
573 
574 	rval = qla2x00_start_nvme_mq(sp);
575 	if (rval != QLA_SUCCESS) {
576 		ql_log(ql_log_warn, vha, 0x212d,
577 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
578 		wake_up(&sp->nvme_ls_waitq);
579 		sp->priv = NULL;
580 		priv->sp = NULL;
581 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
582 	}
583 
584 	return rval;
585 }
586 
587 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
588 {
589 	struct scsi_qla_host *vha = lport->private;
590 
591 	ql_log(ql_log_info, vha, 0x210f,
592 	    "localport delete of %p completed.\n", vha->nvme_local_port);
593 	vha->nvme_local_port = NULL;
594 	complete(&vha->nvme_del_done);
595 }
596 
597 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
598 {
599 	fc_port_t *fcport;
600 	struct qla_nvme_rport *qla_rport = rport->private;
601 
602 	fcport = qla_rport->fcport;
603 	fcport->nvme_remote_port = NULL;
604 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
605 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
606 	ql_log(ql_log_info, fcport->vha, 0x2110,
607 	    "remoteport_delete of %p %8phN completed.\n",
608 	    fcport, fcport->port_name);
609 	complete(&fcport->nvme_del_done);
610 }
611 
612 static struct nvme_fc_port_template qla_nvme_fc_transport = {
613 	.module	= THIS_MODULE,
614 	.localport_delete = qla_nvme_localport_delete,
615 	.remoteport_delete = qla_nvme_remoteport_delete,
616 	.create_queue   = qla_nvme_alloc_queue,
617 	.delete_queue 	= NULL,
618 	.ls_req		= qla_nvme_ls_req,
619 	.ls_abort	= qla_nvme_ls_abort,
620 	.fcp_io		= qla_nvme_post_cmd,
621 	.fcp_abort	= qla_nvme_fcp_abort,
622 	.max_hw_queues  = 8,
623 	.max_sgl_segments = 1024,
624 	.max_dif_sgl_segments = 64,
625 	.dma_boundary = 0xFFFFFFFF,
626 	.local_priv_sz  = 8,
627 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
628 	.lsrqst_priv_sz = sizeof(struct nvme_private),
629 	.fcprqst_priv_sz = sizeof(struct nvme_private),
630 };
631 
632 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
633 {
634 	int ret;
635 
636 	if (!IS_ENABLED(CONFIG_NVME_FC))
637 		return;
638 
639 	ql_log(ql_log_warn, NULL, 0x2112,
640 	    "%s: unregister remoteport on %p %8phN\n",
641 	    __func__, fcport, fcport->port_name);
642 
643 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
644 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
645 
646 	init_completion(&fcport->nvme_del_done);
647 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
648 	if (ret)
649 		ql_log(ql_log_info, fcport->vha, 0x2114,
650 			"%s: Failed to unregister nvme_remote_port (%d)\n",
651 			    __func__, ret);
652 	wait_for_completion(&fcport->nvme_del_done);
653 }
654 
655 void qla_nvme_delete(struct scsi_qla_host *vha)
656 {
657 	int nv_ret;
658 
659 	if (!IS_ENABLED(CONFIG_NVME_FC))
660 		return;
661 
662 	if (vha->nvme_local_port) {
663 		init_completion(&vha->nvme_del_done);
664 		ql_log(ql_log_info, vha, 0x2116,
665 			"unregister localport=%p\n",
666 			vha->nvme_local_port);
667 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
668 		if (nv_ret)
669 			ql_log(ql_log_info, vha, 0x2115,
670 			    "Unregister of localport failed\n");
671 		else
672 			wait_for_completion(&vha->nvme_del_done);
673 	}
674 }
675 
676 int qla_nvme_register_hba(struct scsi_qla_host *vha)
677 {
678 	struct nvme_fc_port_template *tmpl;
679 	struct qla_hw_data *ha;
680 	struct nvme_fc_port_info pinfo;
681 	int ret = EINVAL;
682 
683 	if (!IS_ENABLED(CONFIG_NVME_FC))
684 		return ret;
685 
686 	ha = vha->hw;
687 	tmpl = &qla_nvme_fc_transport;
688 
689 	WARN_ON(vha->nvme_local_port);
690 	WARN_ON(ha->max_req_queues < 3);
691 
692 	qla_nvme_fc_transport.max_hw_queues =
693 	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
694 		(uint8_t)(ha->max_req_queues - 2));
695 
696 	pinfo.node_name = wwn_to_u64(vha->node_name);
697 	pinfo.port_name = wwn_to_u64(vha->port_name);
698 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
699 	pinfo.port_id = vha->d_id.b24;
700 
701 	ql_log(ql_log_info, vha, 0xffff,
702 	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
703 	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
704 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
705 
706 	ret = nvme_fc_register_localport(&pinfo, tmpl,
707 	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
708 	if (ret) {
709 		ql_log(ql_log_warn, vha, 0xffff,
710 		    "register_localport failed: ret=%x\n", ret);
711 	} else {
712 		vha->nvme_local_port->private = vha;
713 	}
714 
715 	return ret;
716 }
717