1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include "lpfc_version.h"
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
53
54 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
56 dma_addr_t rspbuf,
57 uint16_t rspsize);
58 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
62 uint32_t, uint16_t);
63 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
65 uint32_t, uint16_t);
66 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
70 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71
72 static union lpfc_wqe128 lpfc_tsend_cmd_template;
73 static union lpfc_wqe128 lpfc_treceive_cmd_template;
74 static union lpfc_wqe128 lpfc_trsp_cmd_template;
75
76 /* Setup WQE templates for NVME IOs */
77 void
lpfc_nvmet_cmd_template(void)78 lpfc_nvmet_cmd_template(void)
79 {
80 union lpfc_wqe128 *wqe;
81
82 /* TSEND template */
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
85
86 /* Word 0, 1, 2 - BDE is variable */
87
88 /* Word 3 - payload_offset_len is zero */
89
90 /* Word 4 - relative_offset is variable */
91
92 /* Word 5 - is zero */
93
94 /* Word 6 - ctxt_tag, xri_tag is variable */
95
96 /* Word 7 - wqe_ar is variable */
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102
103 /* Word 8 - abort_tag is variable */
104
105 /* Word 9 - reqtag, rcvoxid is variable */
106
107 /* Word 10 - wqes, xc is variable */
108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114
115 /* Word 11 - sup, irsp, irsplen is variable */
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
122
123 /* Word 12 - fcp_data_len is variable */
124
125 /* Word 13, 14, 15 - PBDE is zero */
126
127 /* TRECEIVE template */
128 wqe = &lpfc_treceive_cmd_template;
129 memset(wqe, 0, sizeof(union lpfc_wqe128));
130
131 /* Word 0, 1, 2 - BDE is variable */
132
133 /* Word 3 */
134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
135
136 /* Word 4 - relative_offset is variable */
137
138 /* Word 5 - is zero */
139
140 /* Word 6 - ctxt_tag, xri_tag is variable */
141
142 /* Word 7 */
143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
148
149 /* Word 8 - abort_tag is variable */
150
151 /* Word 9 - reqtag, rcvoxid is variable */
152
153 /* Word 10 - xc is variable */
154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156 bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
160
161 /* Word 11 - pbde is variable */
162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
168
169 /* Word 12 - fcp_data_len is variable */
170
171 /* Word 13, 14, 15 - PBDE is variable */
172
173 /* TRSP template */
174 wqe = &lpfc_trsp_cmd_template;
175 memset(wqe, 0, sizeof(union lpfc_wqe128));
176
177 /* Word 0, 1, 2 - BDE is variable */
178
179 /* Word 3 - response_len is variable */
180
181 /* Word 4, 5 - is zero */
182
183 /* Word 6 - ctxt_tag, xri_tag is variable */
184
185 /* Word 7 */
186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
191
192 /* Word 8 - abort_tag is variable */
193
194 /* Word 9 - reqtag is variable */
195
196 /* Word 10 wqes, xc is variable */
197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198 bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
203
204 /* Word 11 irsp, irsplen is variable */
205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
211
212 /* Word 12, 13, 14, 15 - is zero */
213 }
214
215 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216 static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba * phba,u16 xri)217 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
218 {
219 struct lpfc_async_xchg_ctx *ctxp;
220 unsigned long iflag;
221 bool found = false;
222
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226 continue;
227
228 found = true;
229 break;
230 }
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232 if (found)
233 return ctxp;
234
235 return NULL;
236 }
237
238 static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba * phba,u16 oxid,u32 sid)239 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240 {
241 struct lpfc_async_xchg_ctx *ctxp;
242 unsigned long iflag;
243 bool found = false;
244
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
248 continue;
249
250 found = true;
251 break;
252 }
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254 if (found)
255 return ctxp;
256
257 return NULL;
258 }
259 #endif
260
261 static void
lpfc_nvmet_defer_release(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)262 lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263 struct lpfc_async_xchg_ctx *ctxp)
264 {
265 lockdep_assert_held(&ctxp->ctxlock);
266
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp->oxid, ctxp->flag);
270
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
272 return;
273
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
275 spin_lock(&phba->sli4_hba.t_active_list_lock);
276 list_del(&ctxp->list);
277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281 }
282
283 /**
284 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
285 * transmission of an NVME LS response.
286 * @phba: Pointer to HBA context object.
287 * @cmdwqe: Pointer to driver command WQE object.
288 * @rspwqe: Pointer to driver response WQE object.
289 *
290 * The function is called from SLI ring event handler with no
291 * lock held. The function frees memory resources used for the command
292 * used to send the NVME LS RSP.
293 **/
294 void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)295 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296 struct lpfc_iocbq *rspwqe)
297 {
298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
299 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
300 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
301 uint32_t status, result;
302
303 status = bf_get(lpfc_wcqe_c_status, wcqe);
304 result = wcqe->parameter;
305
306 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
308 "6410 NVMEx LS cmpl state mismatch IO x%x: "
309 "%d %d\n",
310 axchg->oxid, axchg->state, axchg->entry_cnt);
311 }
312
313 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
314 axchg->oxid, status, result);
315
316 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
317 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
318 status, result, axchg->oxid);
319
320 lpfc_nlp_put(cmdwqe->ndlp);
321 cmdwqe->context_un.axchg = NULL;
322 cmdwqe->bpl_dmabuf = NULL;
323 lpfc_sli_release_iocbq(phba, cmdwqe);
324 ls_rsp->done(ls_rsp);
325 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
326 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
327 status, axchg->oxid);
328 kfree(axchg);
329 }
330
331 /**
332 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
333 * @phba: Pointer to HBA context object.
334 * @cmdwqe: Pointer to driver command WQE object.
335 * @rspwqe: Pointer to driver response WQE object.
336 *
337 * The function is called from SLI ring event handler with no
338 * lock held. This function is the completion handler for NVME LS commands
339 * The function updates any states and statistics, then calls the
340 * generic completion handler to free resources.
341 **/
342 static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)343 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
344 struct lpfc_iocbq *rspwqe)
345 {
346 struct lpfc_nvmet_tgtport *tgtp;
347 uint32_t status, result;
348 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
349
350 if (!phba->targetport)
351 goto finish;
352
353 status = bf_get(lpfc_wcqe_c_status, wcqe);
354 result = wcqe->parameter;
355
356 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
357 if (tgtp) {
358 if (status) {
359 atomic_inc(&tgtp->xmt_ls_rsp_error);
360 if (result == IOERR_ABORT_REQUESTED)
361 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
362 if (bf_get(lpfc_wcqe_c_xb, wcqe))
363 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
364 } else {
365 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
366 }
367 }
368
369 finish:
370 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
371 }
372
373 /**
374 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
375 * @phba: HBA buffer is associated with
376 * @ctx_buf: ctx buffer context
377 *
378 * Description: Frees the given DMA buffer in the appropriate way given by
379 * reposting it to its associated RQ so it can be reused.
380 *
381 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
382 *
383 * Returns: None
384 **/
385 void
lpfc_nvmet_ctxbuf_post(struct lpfc_hba * phba,struct lpfc_nvmet_ctxbuf * ctx_buf)386 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
387 {
388 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
389 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
390 struct lpfc_nvmet_tgtport *tgtp;
391 struct fc_frame_header *fc_hdr;
392 struct rqb_dmabuf *nvmebuf;
393 struct lpfc_nvmet_ctx_info *infop;
394 uint32_t size, oxid, sid;
395 int cpu;
396 unsigned long iflag;
397
398 if (ctxp->state == LPFC_NVME_STE_FREE) {
399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
400 "6411 NVMET free, already free IO x%x: %d %d\n",
401 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
402 }
403
404 if (ctxp->rqb_buffer) {
405 spin_lock_irqsave(&ctxp->ctxlock, iflag);
406 nvmebuf = ctxp->rqb_buffer;
407 /* check if freed in another path whilst acquiring lock */
408 if (nvmebuf) {
409 ctxp->rqb_buffer = NULL;
410 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
411 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
412 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
413 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
414 nvmebuf);
415 } else {
416 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
417 /* repost */
418 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
419 }
420 } else {
421 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
422 }
423 }
424 ctxp->state = LPFC_NVME_STE_FREE;
425
426 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
427 if (phba->sli4_hba.nvmet_io_wait_cnt) {
428 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
429 nvmebuf, struct rqb_dmabuf,
430 hbuf.list);
431 phba->sli4_hba.nvmet_io_wait_cnt--;
432 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
433 iflag);
434
435 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
436 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
437 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
438 size = nvmebuf->bytes_recv;
439 sid = sli4_sid_from_fc_hdr(fc_hdr);
440
441 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
442 ctxp->wqeq = NULL;
443 ctxp->offset = 0;
444 ctxp->phba = phba;
445 ctxp->size = size;
446 ctxp->oxid = oxid;
447 ctxp->sid = sid;
448 ctxp->state = LPFC_NVME_STE_RCV;
449 ctxp->entry_cnt = 1;
450 ctxp->flag = 0;
451 ctxp->ctxbuf = ctx_buf;
452 ctxp->rqb_buffer = (void *)nvmebuf;
453 spin_lock_init(&ctxp->ctxlock);
454
455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
456 /* NOTE: isr time stamp is stale when context is re-assigned*/
457 if (ctxp->ts_isr_cmd) {
458 ctxp->ts_cmd_nvme = 0;
459 ctxp->ts_nvme_data = 0;
460 ctxp->ts_data_wqput = 0;
461 ctxp->ts_isr_data = 0;
462 ctxp->ts_data_nvme = 0;
463 ctxp->ts_nvme_status = 0;
464 ctxp->ts_status_wqput = 0;
465 ctxp->ts_isr_status = 0;
466 ctxp->ts_status_nvme = 0;
467 }
468 #endif
469 atomic_inc(&tgtp->rcv_fcp_cmd_in);
470
471 /* Indicate that a replacement buffer has been posted */
472 spin_lock_irqsave(&ctxp->ctxlock, iflag);
473 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
474 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
475
476 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
477 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
478 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
479 "6181 Unable to queue deferred work "
480 "for oxid x%x. "
481 "FCP Drop IO [x%x x%x x%x]\n",
482 ctxp->oxid,
483 atomic_read(&tgtp->rcv_fcp_cmd_in),
484 atomic_read(&tgtp->rcv_fcp_cmd_out),
485 atomic_read(&tgtp->xmt_fcp_release));
486
487 spin_lock_irqsave(&ctxp->ctxlock, iflag);
488 lpfc_nvmet_defer_release(phba, ctxp);
489 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
490 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
491 }
492 return;
493 }
494 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
495
496 /*
497 * Use the CPU context list, from the MRQ the IO was received on
498 * (ctxp->idx), to save context structure.
499 */
500 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
501 list_del_init(&ctxp->list);
502 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
503 cpu = raw_smp_processor_id();
504 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
505 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
506 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
507 infop->nvmet_ctx_list_cnt++;
508 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
509 #endif
510 }
511
512 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
513 static void
lpfc_nvmet_ktime(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)514 lpfc_nvmet_ktime(struct lpfc_hba *phba,
515 struct lpfc_async_xchg_ctx *ctxp)
516 {
517 uint64_t seg1, seg2, seg3, seg4, seg5;
518 uint64_t seg6, seg7, seg8, seg9, seg10;
519 uint64_t segsum;
520
521 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
522 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
523 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
524 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
525 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
526 return;
527
528 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
529 return;
530 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
531 return;
532 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
533 return;
534 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
535 return;
536 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
537 return;
538 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
539 return;
540 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
541 return;
542 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
543 return;
544 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
545 return;
546 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
547 return;
548 /*
549 * Segment 1 - Time from FCP command received by MSI-X ISR
550 * to FCP command is passed to NVME Layer.
551 * Segment 2 - Time from FCP command payload handed
552 * off to NVME Layer to Driver receives a Command op
553 * from NVME Layer.
554 * Segment 3 - Time from Driver receives a Command op
555 * from NVME Layer to Command is put on WQ.
556 * Segment 4 - Time from Driver WQ put is done
557 * to MSI-X ISR for Command cmpl.
558 * Segment 5 - Time from MSI-X ISR for Command cmpl to
559 * Command cmpl is passed to NVME Layer.
560 * Segment 6 - Time from Command cmpl is passed to NVME
561 * Layer to Driver receives a RSP op from NVME Layer.
562 * Segment 7 - Time from Driver receives a RSP op from
563 * NVME Layer to WQ put is done on TRSP FCP Status.
564 * Segment 8 - Time from Driver WQ put is done on TRSP
565 * FCP Status to MSI-X ISR for TRSP cmpl.
566 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
567 * TRSP cmpl is passed to NVME Layer.
568 * Segment 10 - Time from FCP command received by
569 * MSI-X ISR to command is completed on wire.
570 * (Segments 1 thru 8) for READDATA / WRITEDATA
571 * (Segments 1 thru 4) for READDATA_RSP
572 */
573 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
574 segsum = seg1;
575
576 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
577 if (segsum > seg2)
578 return;
579 seg2 -= segsum;
580 segsum += seg2;
581
582 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
583 if (segsum > seg3)
584 return;
585 seg3 -= segsum;
586 segsum += seg3;
587
588 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
589 if (segsum > seg4)
590 return;
591 seg4 -= segsum;
592 segsum += seg4;
593
594 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
595 if (segsum > seg5)
596 return;
597 seg5 -= segsum;
598 segsum += seg5;
599
600
601 /* For auto rsp commands seg6 thru seg10 will be 0 */
602 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
603 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
604 if (segsum > seg6)
605 return;
606 seg6 -= segsum;
607 segsum += seg6;
608
609 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
610 if (segsum > seg7)
611 return;
612 seg7 -= segsum;
613 segsum += seg7;
614
615 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
616 if (segsum > seg8)
617 return;
618 seg8 -= segsum;
619 segsum += seg8;
620
621 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
622 if (segsum > seg9)
623 return;
624 seg9 -= segsum;
625 segsum += seg9;
626
627 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
628 return;
629 seg10 = (ctxp->ts_isr_status -
630 ctxp->ts_isr_cmd);
631 } else {
632 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
633 return;
634 seg6 = 0;
635 seg7 = 0;
636 seg8 = 0;
637 seg9 = 0;
638 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
639 }
640
641 phba->ktime_seg1_total += seg1;
642 if (seg1 < phba->ktime_seg1_min)
643 phba->ktime_seg1_min = seg1;
644 else if (seg1 > phba->ktime_seg1_max)
645 phba->ktime_seg1_max = seg1;
646
647 phba->ktime_seg2_total += seg2;
648 if (seg2 < phba->ktime_seg2_min)
649 phba->ktime_seg2_min = seg2;
650 else if (seg2 > phba->ktime_seg2_max)
651 phba->ktime_seg2_max = seg2;
652
653 phba->ktime_seg3_total += seg3;
654 if (seg3 < phba->ktime_seg3_min)
655 phba->ktime_seg3_min = seg3;
656 else if (seg3 > phba->ktime_seg3_max)
657 phba->ktime_seg3_max = seg3;
658
659 phba->ktime_seg4_total += seg4;
660 if (seg4 < phba->ktime_seg4_min)
661 phba->ktime_seg4_min = seg4;
662 else if (seg4 > phba->ktime_seg4_max)
663 phba->ktime_seg4_max = seg4;
664
665 phba->ktime_seg5_total += seg5;
666 if (seg5 < phba->ktime_seg5_min)
667 phba->ktime_seg5_min = seg5;
668 else if (seg5 > phba->ktime_seg5_max)
669 phba->ktime_seg5_max = seg5;
670
671 phba->ktime_data_samples++;
672 if (!seg6)
673 goto out;
674
675 phba->ktime_seg6_total += seg6;
676 if (seg6 < phba->ktime_seg6_min)
677 phba->ktime_seg6_min = seg6;
678 else if (seg6 > phba->ktime_seg6_max)
679 phba->ktime_seg6_max = seg6;
680
681 phba->ktime_seg7_total += seg7;
682 if (seg7 < phba->ktime_seg7_min)
683 phba->ktime_seg7_min = seg7;
684 else if (seg7 > phba->ktime_seg7_max)
685 phba->ktime_seg7_max = seg7;
686
687 phba->ktime_seg8_total += seg8;
688 if (seg8 < phba->ktime_seg8_min)
689 phba->ktime_seg8_min = seg8;
690 else if (seg8 > phba->ktime_seg8_max)
691 phba->ktime_seg8_max = seg8;
692
693 phba->ktime_seg9_total += seg9;
694 if (seg9 < phba->ktime_seg9_min)
695 phba->ktime_seg9_min = seg9;
696 else if (seg9 > phba->ktime_seg9_max)
697 phba->ktime_seg9_max = seg9;
698 out:
699 phba->ktime_seg10_total += seg10;
700 if (seg10 < phba->ktime_seg10_min)
701 phba->ktime_seg10_min = seg10;
702 else if (seg10 > phba->ktime_seg10_max)
703 phba->ktime_seg10_max = seg10;
704 phba->ktime_status_samples++;
705 }
706 #endif
707
708 /**
709 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
710 * @phba: Pointer to HBA context object.
711 * @cmdwqe: Pointer to driver command WQE object.
712 * @rspwqe: Pointer to driver response WQE object.
713 *
714 * The function is called from SLI ring event handler with no
715 * lock held. This function is the completion handler for NVME FCP commands
716 * The function frees memory resources used for the NVME commands.
717 **/
718 static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)719 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
720 struct lpfc_iocbq *rspwqe)
721 {
722 struct lpfc_nvmet_tgtport *tgtp;
723 struct nvmefc_tgt_fcp_req *rsp;
724 struct lpfc_async_xchg_ctx *ctxp;
725 uint32_t status, result, op, logerr;
726 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
727 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
728 int id;
729 #endif
730
731 ctxp = cmdwqe->context_un.axchg;
732 ctxp->flag &= ~LPFC_NVME_IO_INP;
733
734 rsp = &ctxp->hdlrctx.fcp_req;
735 op = rsp->op;
736
737 status = bf_get(lpfc_wcqe_c_status, wcqe);
738 result = wcqe->parameter;
739
740 if (phba->targetport)
741 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
742 else
743 tgtp = NULL;
744
745 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
746 ctxp->oxid, op, status);
747
748 if (status) {
749 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
750 rsp->transferred_length = 0;
751 if (tgtp) {
752 atomic_inc(&tgtp->xmt_fcp_rsp_error);
753 if (result == IOERR_ABORT_REQUESTED)
754 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
755 }
756
757 logerr = LOG_NVME_IOERR;
758
759 /* pick up SLI4 exhange busy condition */
760 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
761 ctxp->flag |= LPFC_NVME_XBUSY;
762 logerr |= LOG_NVME_ABTS;
763 if (tgtp)
764 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
765
766 } else {
767 ctxp->flag &= ~LPFC_NVME_XBUSY;
768 }
769
770 lpfc_printf_log(phba, KERN_INFO, logerr,
771 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
772 "XBUSY:x%x\n",
773 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
774 status, result, ctxp->flag);
775
776 } else {
777 rsp->fcp_error = NVME_SC_SUCCESS;
778 if (op == NVMET_FCOP_RSP)
779 rsp->transferred_length = rsp->rsplen;
780 else
781 rsp->transferred_length = rsp->transfer_length;
782 if (tgtp)
783 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
784 }
785
786 if ((op == NVMET_FCOP_READDATA_RSP) ||
787 (op == NVMET_FCOP_RSP)) {
788 /* Sanity check */
789 ctxp->state = LPFC_NVME_STE_DONE;
790 ctxp->entry_cnt++;
791
792 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
793 if (ctxp->ts_cmd_nvme) {
794 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
795 ctxp->ts_isr_data =
796 cmdwqe->isr_timestamp;
797 ctxp->ts_data_nvme =
798 ktime_get_ns();
799 ctxp->ts_nvme_status =
800 ctxp->ts_data_nvme;
801 ctxp->ts_status_wqput =
802 ctxp->ts_data_nvme;
803 ctxp->ts_isr_status =
804 ctxp->ts_data_nvme;
805 ctxp->ts_status_nvme =
806 ctxp->ts_data_nvme;
807 } else {
808 ctxp->ts_isr_status =
809 cmdwqe->isr_timestamp;
810 ctxp->ts_status_nvme =
811 ktime_get_ns();
812 }
813 }
814 #endif
815 rsp->done(rsp);
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817 if (ctxp->ts_cmd_nvme)
818 lpfc_nvmet_ktime(phba, ctxp);
819 #endif
820 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
821 } else {
822 ctxp->entry_cnt++;
823 memset_startat(cmdwqe, 0, cmd_flag);
824 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
825 if (ctxp->ts_cmd_nvme) {
826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
827 ctxp->ts_data_nvme = ktime_get_ns();
828 }
829 #endif
830 rsp->done(rsp);
831 }
832 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
833 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
834 id = raw_smp_processor_id();
835 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
836 if (ctxp->cpu != id)
837 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
838 "6704 CPU Check cmdcmpl: "
839 "cpu %d expect %d\n",
840 id, ctxp->cpu);
841 }
842 #endif
843 }
844
845 /**
846 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
847 * an NVME LS rsp for a prior NVME LS request that was received.
848 * @axchg: pointer to exchange context for the NVME LS request the response
849 * is for.
850 * @ls_rsp: pointer to the transport LS RSP that is to be sent
851 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
852 *
853 * This routine is used to format and send a WQE to transmit a NVME LS
854 * Response. The response is for a prior NVME LS request that was
855 * received and posted to the transport.
856 *
857 * Returns:
858 * 0 : if response successfully transmit
859 * non-zero : if response failed to transmit, of the form -Exxx.
860 **/
861 int
__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx * axchg,struct nvmefc_ls_rsp * ls_rsp,void (* xmt_ls_rsp_cmp)(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe))862 __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
863 struct nvmefc_ls_rsp *ls_rsp,
864 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
865 struct lpfc_iocbq *cmdwqe,
866 struct lpfc_iocbq *rspwqe))
867 {
868 struct lpfc_hba *phba = axchg->phba;
869 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
870 struct lpfc_iocbq *nvmewqeq;
871 struct lpfc_dmabuf dmabuf;
872 struct ulp_bde64 bpl;
873 int rc;
874
875 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
876 return -ENODEV;
877
878 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
879 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
880
881 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
883 "6412 NVMEx LS rsp state mismatch "
884 "oxid x%x: %d %d\n",
885 axchg->oxid, axchg->state, axchg->entry_cnt);
886 return -EALREADY;
887 }
888 axchg->state = LPFC_NVME_STE_LS_RSP;
889 axchg->entry_cnt++;
890
891 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
892 ls_rsp->rsplen);
893 if (nvmewqeq == NULL) {
894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
896 axchg->oxid);
897 rc = -ENOMEM;
898 goto out_free_buf;
899 }
900
901 /* Save numBdes for bpl2sgl */
902 nvmewqeq->num_bdes = 1;
903 nvmewqeq->hba_wqidx = 0;
904 nvmewqeq->bpl_dmabuf = &dmabuf;
905 dmabuf.virt = &bpl;
906 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
907 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
908 bpl.tus.f.bdeSize = ls_rsp->rsplen;
909 bpl.tus.f.bdeFlags = 0;
910 bpl.tus.w = le32_to_cpu(bpl.tus.w);
911 /*
912 * Note: although we're using stack space for the dmabuf, the
913 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
914 * be referenced after it returns back to this routine.
915 */
916
917 nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
918 nvmewqeq->context_un.axchg = axchg;
919
920 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
921 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
922
923 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
924
925 /* clear to be sure there's no reference */
926 nvmewqeq->bpl_dmabuf = NULL;
927
928 if (rc == WQE_SUCCESS) {
929 /*
930 * Okay to repost buffer here, but wait till cmpl
931 * before freeing ctxp and iocbq.
932 */
933 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
934 return 0;
935 }
936
937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
938 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
939 axchg->oxid, rc);
940
941 rc = -ENXIO;
942
943 lpfc_nlp_put(nvmewqeq->ndlp);
944
945 out_free_buf:
946 /* Give back resources */
947 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
948
949 /*
950 * As transport doesn't track completions of responses, if the rsp
951 * fails to send, the transport will effectively ignore the rsp
952 * and consider the LS done. However, the driver has an active
953 * exchange open for the LS - so be sure to abort the exchange
954 * if the response isn't sent.
955 */
956 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
957 return rc;
958 }
959
960 /**
961 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
962 * @tgtport: pointer to target port that NVME LS is to be transmit from.
963 * @ls_rsp: pointer to the transport LS RSP that is to be sent
964 *
965 * Driver registers this routine to transmit responses for received NVME
966 * LS requests.
967 *
968 * This routine is used to format and send a WQE to transmit a NVME LS
969 * Response. The ls_rsp is used to reverse-map the LS to the original
970 * NVME LS request sequence, which provides addressing information for
971 * the remote port the LS to be sent to, as well as the exchange id
972 * that is the LS is bound to.
973 *
974 * Returns:
975 * 0 : if response successfully transmit
976 * non-zero : if response failed to transmit, of the form -Exxx.
977 **/
978 static int
lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port * tgtport,struct nvmefc_ls_rsp * ls_rsp)979 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
980 struct nvmefc_ls_rsp *ls_rsp)
981 {
982 struct lpfc_async_xchg_ctx *axchg =
983 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
984 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
985 int rc;
986
987 if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag))
988 return -ENODEV;
989
990 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
991
992 if (rc) {
993 atomic_inc(&nvmep->xmt_ls_drop);
994 /*
995 * unless the failure is due to having already sent
996 * the response, an abort will be generated for the
997 * exchange if the rsp can't be sent.
998 */
999 if (rc != -EALREADY)
1000 atomic_inc(&nvmep->xmt_ls_abort);
1001 return rc;
1002 }
1003
1004 atomic_inc(&nvmep->xmt_ls_rsp);
1005 return 0;
1006 }
1007
1008 static int
lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1009 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1010 struct nvmefc_tgt_fcp_req *rsp)
1011 {
1012 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013 struct lpfc_async_xchg_ctx *ctxp =
1014 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1015 struct lpfc_hba *phba = ctxp->phba;
1016 struct lpfc_queue *wq;
1017 struct lpfc_iocbq *nvmewqeq;
1018 struct lpfc_sli_ring *pring;
1019 unsigned long iflags;
1020 int rc;
1021 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1022 int id;
1023 #endif
1024
1025 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1026 rc = -ENODEV;
1027 goto aerr;
1028 }
1029
1030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031 if (ctxp->ts_cmd_nvme) {
1032 if (rsp->op == NVMET_FCOP_RSP)
1033 ctxp->ts_nvme_status = ktime_get_ns();
1034 else
1035 ctxp->ts_nvme_data = ktime_get_ns();
1036 }
1037
1038 /* Setup the hdw queue if not already set */
1039 if (!ctxp->hdwq)
1040 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1041
1042 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1043 id = raw_smp_processor_id();
1044 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1045 if (rsp->hwqid != id)
1046 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1047 "6705 CPU Check OP: "
1048 "cpu %d expect %d\n",
1049 id, rsp->hwqid);
1050 ctxp->cpu = id; /* Setup cpu for cmpl check */
1051 }
1052 #endif
1053
1054 /* Sanity check */
1055 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1057 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1059 "6102 IO oxid x%x aborted\n",
1060 ctxp->oxid);
1061 rc = -ENXIO;
1062 goto aerr;
1063 }
1064
1065 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1066 if (nvmewqeq == NULL) {
1067 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1069 "6152 FCP Drop IO x%x: Prep\n",
1070 ctxp->oxid);
1071 rc = -ENXIO;
1072 goto aerr;
1073 }
1074
1075 nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1076 nvmewqeq->context_un.axchg = ctxp;
1077 nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
1078 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1079
1080 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1081 ctxp->oxid, rsp->op, rsp->rsplen);
1082
1083 ctxp->flag |= LPFC_NVME_IO_INP;
1084 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1085 if (rc == WQE_SUCCESS) {
1086 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1087 if (!ctxp->ts_cmd_nvme)
1088 return 0;
1089 if (rsp->op == NVMET_FCOP_RSP)
1090 ctxp->ts_status_wqput = ktime_get_ns();
1091 else
1092 ctxp->ts_data_wqput = ktime_get_ns();
1093 #endif
1094 return 0;
1095 }
1096
1097 if (rc == -EBUSY) {
1098 /*
1099 * WQ was full, so queue nvmewqeq to be sent after
1100 * WQE release CQE
1101 */
1102 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1103 wq = ctxp->hdwq->io_wq;
1104 pring = wq->pring;
1105 spin_lock_irqsave(&pring->ring_lock, iflags);
1106 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1107 wq->q_flag |= HBA_NVMET_WQFULL;
1108 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1109 atomic_inc(&lpfc_nvmep->defer_wqfull);
1110 return 0;
1111 }
1112
1113 /* Give back resources */
1114 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1116 "6153 FCP Drop IO x%x: Issue: %d\n",
1117 ctxp->oxid, rc);
1118
1119 ctxp->wqeq->hba_wqidx = 0;
1120 nvmewqeq->context_un.axchg = NULL;
1121 nvmewqeq->bpl_dmabuf = NULL;
1122 rc = -EBUSY;
1123 aerr:
1124 return rc;
1125 }
1126
1127 static void
lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port * targetport)1128 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1129 {
1130 struct lpfc_nvmet_tgtport *tport = targetport->private;
1131
1132 /* release any threads waiting for the unreg to complete */
1133 if (tport->phba->targetport)
1134 complete(tport->tport_unreg_cmp);
1135 }
1136
1137 static void
lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * req)1138 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1139 struct nvmefc_tgt_fcp_req *req)
1140 {
1141 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1142 struct lpfc_async_xchg_ctx *ctxp =
1143 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1144 struct lpfc_hba *phba = ctxp->phba;
1145 struct lpfc_queue *wq;
1146 unsigned long flags;
1147
1148 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1149 return;
1150
1151 if (!ctxp->hdwq)
1152 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1153
1154 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1155 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1156 ctxp->oxid, ctxp->flag, ctxp->state);
1157
1158 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1159 ctxp->oxid, ctxp->flag, ctxp->state);
1160
1161 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1162
1163 spin_lock_irqsave(&ctxp->ctxlock, flags);
1164
1165 /* Since iaab/iaar are NOT set, we need to check
1166 * if the firmware is in process of aborting IO
1167 */
1168 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1169 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1170 return;
1171 }
1172 ctxp->flag |= LPFC_NVME_ABORT_OP;
1173
1174 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1175 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1176 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1177 ctxp->oxid);
1178 wq = ctxp->hdwq->io_wq;
1179 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1180 return;
1181 }
1182 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1183
1184 /* A state of LPFC_NVME_STE_RCV means we have just received
1185 * the NVME command and have not started processing it.
1186 * (by issuing any IO WQEs on this exchange yet)
1187 */
1188 if (ctxp->state == LPFC_NVME_STE_RCV)
1189 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1190 ctxp->oxid);
1191 else
1192 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1193 ctxp->oxid);
1194 }
1195
1196 static void
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1197 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1198 struct nvmefc_tgt_fcp_req *rsp)
1199 {
1200 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1201 struct lpfc_async_xchg_ctx *ctxp =
1202 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1203 struct lpfc_hba *phba = ctxp->phba;
1204 unsigned long flags;
1205 bool aborting = false;
1206
1207 spin_lock_irqsave(&ctxp->ctxlock, flags);
1208 if (ctxp->flag & LPFC_NVME_XBUSY)
1209 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1210 "6027 NVMET release with XBUSY flag x%x"
1211 " oxid x%x\n",
1212 ctxp->flag, ctxp->oxid);
1213 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1214 ctxp->state != LPFC_NVME_STE_ABORT)
1215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1216 "6413 NVMET release bad state %d %d oxid x%x\n",
1217 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1218
1219 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1220 (ctxp->flag & LPFC_NVME_XBUSY)) {
1221 aborting = true;
1222 /* let the abort path do the real release */
1223 lpfc_nvmet_defer_release(phba, ctxp);
1224 }
1225 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1226
1227 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1228 ctxp->state, aborting);
1229
1230 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1231 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1232
1233 if (aborting)
1234 return;
1235
1236 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1237 }
1238
1239 static void
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1240 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1241 struct nvmefc_tgt_fcp_req *rsp)
1242 {
1243 struct lpfc_nvmet_tgtport *tgtp;
1244 struct lpfc_async_xchg_ctx *ctxp =
1245 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1246 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1247 struct lpfc_hba *phba = ctxp->phba;
1248 unsigned long iflag;
1249
1250
1251 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1252 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1253
1254 if (!nvmebuf) {
1255 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1256 "6425 Defer rcv: no buffer oxid x%x: "
1257 "flg %x ste %x\n",
1258 ctxp->oxid, ctxp->flag, ctxp->state);
1259 return;
1260 }
1261
1262 tgtp = phba->targetport->private;
1263 if (tgtp)
1264 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1265
1266 /* Free the nvmebuf since a new buffer already replaced it */
1267 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1268 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1269 ctxp->rqb_buffer = NULL;
1270 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1271 }
1272
1273 /**
1274 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1275 * @phba: Pointer to HBA context object
1276 * @cmdwqe: Pointer to driver command WQE object.
1277 * @rspwqe: Pointer to driver response WQE object.
1278 *
1279 * This function is the completion handler for NVME LS requests.
1280 * The function updates any states and statistics, then calls the
1281 * generic completion handler to finish completion of the request.
1282 **/
1283 static void
lpfc_nvmet_ls_req_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)1284 lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1285 struct lpfc_iocbq *rspwqe)
1286 {
1287 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
1288 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1289 }
1290
1291 /**
1292 * lpfc_nvmet_ls_req - Issue an Link Service request
1293 * @targetport: pointer to target instance registered with nvmet transport.
1294 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1295 * Driver sets this value to the ndlp pointer.
1296 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
1297 *
1298 * Driver registers this routine to handle any link service request
1299 * from the nvme_fc transport to a remote nvme-aware port.
1300 *
1301 * Return value :
1302 * 0 - Success
1303 * non-zero: various error codes, in form of -Exxx
1304 **/
1305 static int
lpfc_nvmet_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * pnvme_lsreq)1306 lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1307 void *hosthandle,
1308 struct nvmefc_ls_req *pnvme_lsreq)
1309 {
1310 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1311 struct lpfc_hba *phba;
1312 struct lpfc_nodelist *ndlp;
1313 int ret;
1314 u32 hstate;
1315
1316 if (!lpfc_nvmet)
1317 return -EINVAL;
1318
1319 phba = lpfc_nvmet->phba;
1320 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1321 return -EINVAL;
1322
1323 hstate = atomic_read(&lpfc_nvmet->state);
1324 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1325 return -EACCES;
1326
1327 ndlp = (struct lpfc_nodelist *)hosthandle;
1328
1329 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1330 lpfc_nvmet_ls_req_cmp);
1331
1332 return ret;
1333 }
1334
1335 /**
1336 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1337 * @targetport: Transport targetport, that LS was issued from.
1338 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1339 * Driver sets this value to the ndlp pointer.
1340 * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
1341 *
1342 * Driver registers this routine to abort an NVME LS request that is
1343 * in progress (from the transports perspective).
1344 **/
1345 static void
lpfc_nvmet_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * pnvme_lsreq)1346 lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1347 void *hosthandle,
1348 struct nvmefc_ls_req *pnvme_lsreq)
1349 {
1350 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1351 struct lpfc_hba *phba;
1352 struct lpfc_nodelist *ndlp;
1353 int ret;
1354
1355 phba = lpfc_nvmet->phba;
1356 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1357 return;
1358
1359 ndlp = (struct lpfc_nodelist *)hosthandle;
1360
1361 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1362 if (!ret)
1363 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1364 }
1365
1366 static void
lpfc_nvmet_host_release(void * hosthandle)1367 lpfc_nvmet_host_release(void *hosthandle)
1368 {
1369 struct lpfc_nodelist *ndlp = hosthandle;
1370 struct lpfc_hba *phba = ndlp->phba;
1371 struct lpfc_nvmet_tgtport *tgtp;
1372
1373 if (!phba->targetport || !phba->targetport->private)
1374 return;
1375
1376 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1377 "6202 NVMET XPT releasing hosthandle x%px "
1378 "DID x%x xflags x%x refcnt %d\n",
1379 hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1380 kref_read(&ndlp->kref));
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 spin_lock_irq(&ndlp->lock);
1383 ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1384 spin_unlock_irq(&ndlp->lock);
1385 lpfc_nlp_put(ndlp);
1386 atomic_set(&tgtp->state, 0);
1387 }
1388
1389 static void
lpfc_nvmet_discovery_event(struct nvmet_fc_target_port * tgtport)1390 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1391 {
1392 struct lpfc_nvmet_tgtport *tgtp;
1393 struct lpfc_hba *phba;
1394 uint32_t rc;
1395
1396 tgtp = tgtport->private;
1397 phba = tgtp->phba;
1398
1399 rc = lpfc_issue_els_rscn(phba->pport, 0);
1400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1401 "6420 NVMET subsystem change: Notification %s\n",
1402 (rc) ? "Failed" : "Sent");
1403 }
1404
1405 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1406 .targetport_delete = lpfc_nvmet_targetport_delete,
1407 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1408 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1409 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1410 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1411 .defer_rcv = lpfc_nvmet_defer_rcv,
1412 .discovery_event = lpfc_nvmet_discovery_event,
1413 .ls_req = lpfc_nvmet_ls_req,
1414 .ls_abort = lpfc_nvmet_ls_abort,
1415 .host_release = lpfc_nvmet_host_release,
1416
1417 .max_hw_queues = 1,
1418 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1419 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1420 .dma_boundary = 0xFFFFFFFF,
1421
1422 /* optional features */
1423 .target_features = 0,
1424 /* sizes of additional private data for data structures */
1425 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1426 .lsrqst_priv_sz = 0,
1427 };
1428
1429 static void
__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba * phba,struct lpfc_nvmet_ctx_info * infop)1430 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1431 struct lpfc_nvmet_ctx_info *infop)
1432 {
1433 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1434 unsigned long flags;
1435
1436 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1437 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1438 &infop->nvmet_ctx_list, list) {
1439 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440 list_del_init(&ctx_buf->list);
1441 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1442
1443 spin_lock(&phba->hbalock);
1444 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1445 spin_unlock(&phba->hbalock);
1446
1447 ctx_buf->sglq->state = SGL_FREED;
1448 ctx_buf->sglq->ndlp = NULL;
1449
1450 spin_lock(&phba->sli4_hba.sgl_list_lock);
1451 list_add_tail(&ctx_buf->sglq->list,
1452 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1453 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1454
1455 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1456 kfree(ctx_buf->context);
1457 }
1458 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1459 }
1460
1461 static void
lpfc_nvmet_cleanup_io_context(struct lpfc_hba * phba)1462 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1463 {
1464 struct lpfc_nvmet_ctx_info *infop;
1465 int i, j;
1466
1467 /* The first context list, MRQ 0 CPU 0 */
1468 infop = phba->sli4_hba.nvmet_ctx_info;
1469 if (!infop)
1470 return;
1471
1472 /* Cycle the entire CPU context list for every MRQ */
1473 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1474 for_each_present_cpu(j) {
1475 infop = lpfc_get_ctx_list(phba, j, i);
1476 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1477 }
1478 }
1479 kfree(phba->sli4_hba.nvmet_ctx_info);
1480 phba->sli4_hba.nvmet_ctx_info = NULL;
1481 }
1482
1483 static int
lpfc_nvmet_setup_io_context(struct lpfc_hba * phba)1484 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1485 {
1486 struct lpfc_nvmet_ctxbuf *ctx_buf;
1487 struct lpfc_iocbq *nvmewqe;
1488 union lpfc_wqe128 *wqe;
1489 struct lpfc_nvmet_ctx_info *last_infop;
1490 struct lpfc_nvmet_ctx_info *infop;
1491 int i, j, idx, cpu;
1492
1493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1494 "6403 Allocate NVMET resources for %d XRIs\n",
1495 phba->sli4_hba.nvmet_xri_cnt);
1496
1497 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1498 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1499 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1500 if (!phba->sli4_hba.nvmet_ctx_info) {
1501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1502 "6419 Failed allocate memory for "
1503 "nvmet context lists\n");
1504 return -ENOMEM;
1505 }
1506
1507 /*
1508 * Assuming X CPUs in the system, and Y MRQs, allocate some
1509 * lpfc_nvmet_ctx_info structures as follows:
1510 *
1511 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1512 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1513 * ...
1514 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1515 *
1516 * Each line represents a MRQ "silo" containing an entry for
1517 * every CPU.
1518 *
1519 * MRQ X is initially assumed to be associated with CPU X, thus
1520 * contexts are initially distributed across all MRQs using
1521 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1522 * freed, the are freed to the MRQ silo based on the CPU number
1523 * of the IO completion. Thus a context that was allocated for MRQ A
1524 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1525 */
1526 for_each_possible_cpu(i) {
1527 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1528 infop = lpfc_get_ctx_list(phba, i, j);
1529 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1530 spin_lock_init(&infop->nvmet_ctx_list_lock);
1531 infop->nvmet_ctx_list_cnt = 0;
1532 }
1533 }
1534
1535 /*
1536 * Setup the next CPU context info ptr for each MRQ.
1537 * MRQ 0 will cycle thru CPUs 0 - X separately from
1538 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1539 */
1540 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1541 last_infop = lpfc_get_ctx_list(phba,
1542 cpumask_first(cpu_present_mask),
1543 j);
1544 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1545 infop = lpfc_get_ctx_list(phba, i, j);
1546 infop->nvmet_ctx_next_cpu = last_infop;
1547 last_infop = infop;
1548 }
1549 }
1550
1551 /* For all nvmet xris, allocate resources needed to process a
1552 * received command on a per xri basis.
1553 */
1554 idx = 0;
1555 cpu = cpumask_first(cpu_present_mask);
1556 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1557 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1558 if (!ctx_buf) {
1559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1560 "6404 Ran out of memory for NVMET\n");
1561 return -ENOMEM;
1562 }
1563
1564 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1565 GFP_KERNEL);
1566 if (!ctx_buf->context) {
1567 kfree(ctx_buf);
1568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1569 "6405 Ran out of NVMET "
1570 "context memory\n");
1571 return -ENOMEM;
1572 }
1573 ctx_buf->context->ctxbuf = ctx_buf;
1574 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1575
1576 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1577 if (!ctx_buf->iocbq) {
1578 kfree(ctx_buf->context);
1579 kfree(ctx_buf);
1580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1581 "6406 Ran out of NVMET iocb/WQEs\n");
1582 return -ENOMEM;
1583 }
1584 ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
1585 nvmewqe = ctx_buf->iocbq;
1586 wqe = &nvmewqe->wqe;
1587
1588 /* Initialize WQE */
1589 memset(wqe, 0, sizeof(*wqe));
1590
1591 ctx_buf->iocbq->cmd_dmabuf = NULL;
1592 spin_lock(&phba->sli4_hba.sgl_list_lock);
1593 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1594 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1595 if (!ctx_buf->sglq) {
1596 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1597 kfree(ctx_buf->context);
1598 kfree(ctx_buf);
1599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1600 "6407 Ran out of NVMET XRIs\n");
1601 return -ENOMEM;
1602 }
1603 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1604
1605 /*
1606 * Add ctx to MRQidx context list. Our initial assumption
1607 * is MRQidx will be associated with CPUidx. This association
1608 * can change on the fly.
1609 */
1610 infop = lpfc_get_ctx_list(phba, cpu, idx);
1611 spin_lock(&infop->nvmet_ctx_list_lock);
1612 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1613 infop->nvmet_ctx_list_cnt++;
1614 spin_unlock(&infop->nvmet_ctx_list_lock);
1615
1616 /* Spread ctx structures evenly across all MRQs */
1617 idx++;
1618 if (idx >= phba->cfg_nvmet_mrq) {
1619 idx = 0;
1620 cpu = cpumask_first(cpu_present_mask);
1621 continue;
1622 }
1623 cpu = lpfc_next_present_cpu(cpu);
1624 }
1625
1626 for_each_present_cpu(i) {
1627 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1628 infop = lpfc_get_ctx_list(phba, i, j);
1629 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1630 "6408 TOTAL NVMET ctx for CPU %d "
1631 "MRQ %d: cnt %d nextcpu x%px\n",
1632 i, j, infop->nvmet_ctx_list_cnt,
1633 infop->nvmet_ctx_next_cpu);
1634 }
1635 }
1636 return 0;
1637 }
1638
1639 int
lpfc_nvmet_create_targetport(struct lpfc_hba * phba)1640 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1641 {
1642 struct lpfc_vport *vport = phba->pport;
1643 struct lpfc_nvmet_tgtport *tgtp;
1644 struct nvmet_fc_port_info pinfo;
1645 int error;
1646
1647 if (phba->targetport)
1648 return 0;
1649
1650 error = lpfc_nvmet_setup_io_context(phba);
1651 if (error)
1652 return error;
1653
1654 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1655 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1656 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1657 pinfo.port_id = vport->fc_myDID;
1658
1659 /* We need to tell the transport layer + 1 because it takes page
1660 * alignment into account. When space for the SGL is allocated we
1661 * allocate + 3, one for cmd, one for rsp and one for this alignment
1662 */
1663 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1664 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1665 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1666
1667 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1668 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1669 &phba->pcidev->dev,
1670 &phba->targetport);
1671 #else
1672 error = -ENOENT;
1673 #endif
1674 if (error) {
1675 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1676 "6025 Cannot register NVME targetport x%x: "
1677 "portnm %llx nodenm %llx segs %d qs %d\n",
1678 error,
1679 pinfo.port_name, pinfo.node_name,
1680 lpfc_tgttemplate.max_sgl_segments,
1681 lpfc_tgttemplate.max_hw_queues);
1682 phba->targetport = NULL;
1683 phba->nvmet_support = 0;
1684
1685 lpfc_nvmet_cleanup_io_context(phba);
1686
1687 } else {
1688 tgtp = (struct lpfc_nvmet_tgtport *)
1689 phba->targetport->private;
1690 tgtp->phba = phba;
1691
1692 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1693 "6026 Registered NVME "
1694 "targetport: x%px, private x%px "
1695 "portnm %llx nodenm %llx segs %d qs %d\n",
1696 phba->targetport, tgtp,
1697 pinfo.port_name, pinfo.node_name,
1698 lpfc_tgttemplate.max_sgl_segments,
1699 lpfc_tgttemplate.max_hw_queues);
1700
1701 atomic_set(&tgtp->rcv_ls_req_in, 0);
1702 atomic_set(&tgtp->rcv_ls_req_out, 0);
1703 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1704 atomic_set(&tgtp->xmt_ls_abort, 0);
1705 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1706 atomic_set(&tgtp->xmt_ls_rsp, 0);
1707 atomic_set(&tgtp->xmt_ls_drop, 0);
1708 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1709 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1710 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1711 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1712 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1713 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1714 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1715 atomic_set(&tgtp->xmt_fcp_drop, 0);
1716 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1717 atomic_set(&tgtp->xmt_fcp_read, 0);
1718 atomic_set(&tgtp->xmt_fcp_write, 0);
1719 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1720 atomic_set(&tgtp->xmt_fcp_release, 0);
1721 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1722 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1723 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1724 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1725 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1726 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1727 atomic_set(&tgtp->xmt_fcp_abort, 0);
1728 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1729 atomic_set(&tgtp->xmt_abort_unsol, 0);
1730 atomic_set(&tgtp->xmt_abort_sol, 0);
1731 atomic_set(&tgtp->xmt_abort_rsp, 0);
1732 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1733 atomic_set(&tgtp->defer_ctx, 0);
1734 atomic_set(&tgtp->defer_fod, 0);
1735 atomic_set(&tgtp->defer_wqfull, 0);
1736 }
1737 return error;
1738 }
1739
1740 int
lpfc_nvmet_update_targetport(struct lpfc_hba * phba)1741 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1742 {
1743 struct lpfc_vport *vport = phba->pport;
1744
1745 if (!phba->targetport)
1746 return 0;
1747
1748 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1749 "6007 Update NVMET port x%px did x%x\n",
1750 phba->targetport, vport->fc_myDID);
1751
1752 phba->targetport->port_id = vport->fc_myDID;
1753 return 0;
1754 }
1755
1756 /**
1757 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1758 * @phba: pointer to lpfc hba data structure.
1759 * @axri: pointer to the nvmet xri abort wcqe structure.
1760 *
1761 * This routine is invoked by the worker thread to process a SLI4 fast-path
1762 * NVMET aborted xri.
1763 **/
1764 void
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)1765 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1766 struct sli4_wcqe_xri_aborted *axri)
1767 {
1768 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1769 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1770 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1771 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1772 struct lpfc_nvmet_tgtport *tgtp;
1773 struct nvmefc_tgt_fcp_req *req = NULL;
1774 struct lpfc_nodelist *ndlp;
1775 unsigned long iflag = 0;
1776 int rrq_empty = 0;
1777 bool released = false;
1778
1779 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1780 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1781
1782 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1783 return;
1784
1785 if (phba->targetport) {
1786 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1787 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1788 }
1789
1790 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1791 list_for_each_entry_safe(ctxp, next_ctxp,
1792 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1793 list) {
1794 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1795 continue;
1796
1797 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1798 iflag);
1799
1800 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1801 /* Check if we already received a free context call
1802 * and we have completed processing an abort situation.
1803 */
1804 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1805 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1806 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1807 list_del_init(&ctxp->list);
1808 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1809 released = true;
1810 }
1811 ctxp->flag &= ~LPFC_NVME_XBUSY;
1812 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1813
1814 spin_lock_irqsave(&phba->rrq_list_lock, iflag);
1815 rrq_empty = list_empty(&phba->active_rrq_list);
1816 spin_unlock_irqrestore(&phba->rrq_list_lock, iflag);
1817 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1818 if (ndlp &&
1819 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1820 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1821 lpfc_set_rrq_active(phba, ndlp,
1822 ctxp->ctxbuf->sglq->sli4_lxritag,
1823 rxid, 1);
1824 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1825 }
1826
1827 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1828 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1829 ctxp->oxid, ctxp->flag, released);
1830 if (released)
1831 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1832
1833 if (rrq_empty)
1834 lpfc_worker_wake_up(phba);
1835 return;
1836 }
1837 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1838 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1839 if (ctxp) {
1840 /*
1841 * Abort already done by FW, so BA_ACC sent.
1842 * However, the transport may be unaware.
1843 */
1844 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1845 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1846 "flag x%x oxid x%x rxid x%x\n",
1847 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1848 rxid);
1849
1850 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1851 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1852 ctxp->state = LPFC_NVME_STE_ABORT;
1853 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1854
1855 lpfc_nvmeio_data(phba,
1856 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1857 xri, raw_smp_processor_id(), 0);
1858
1859 req = &ctxp->hdlrctx.fcp_req;
1860 if (req)
1861 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1862 }
1863 #endif
1864 }
1865
1866 int
lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr)1867 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1868 struct fc_frame_header *fc_hdr)
1869 {
1870 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1871 struct lpfc_hba *phba = vport->phba;
1872 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1873 struct nvmefc_tgt_fcp_req *rsp;
1874 uint32_t sid;
1875 uint16_t oxid, xri;
1876 unsigned long iflag = 0;
1877
1878 sid = sli4_sid_from_fc_hdr(fc_hdr);
1879 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1880
1881 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1882 list_for_each_entry_safe(ctxp, next_ctxp,
1883 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1884 list) {
1885 if (ctxp->oxid != oxid || ctxp->sid != sid)
1886 continue;
1887
1888 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1889
1890 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1891 iflag);
1892 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1893 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1894 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1895
1896 lpfc_nvmeio_data(phba,
1897 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1898 xri, raw_smp_processor_id(), 0);
1899
1900 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1901 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1902
1903 rsp = &ctxp->hdlrctx.fcp_req;
1904 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1905
1906 /* Respond with BA_ACC accordingly */
1907 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1908 return 0;
1909 }
1910 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1911 /* check the wait list */
1912 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1913 struct rqb_dmabuf *nvmebuf;
1914 struct fc_frame_header *fc_hdr_tmp;
1915 u32 sid_tmp;
1916 u16 oxid_tmp;
1917 bool found = false;
1918
1919 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1920
1921 /* match by oxid and s_id */
1922 list_for_each_entry(nvmebuf,
1923 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1924 hbuf.list) {
1925 fc_hdr_tmp = (struct fc_frame_header *)
1926 (nvmebuf->hbuf.virt);
1927 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1928 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1929 if (oxid_tmp != oxid || sid_tmp != sid)
1930 continue;
1931
1932 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1933 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1934 "is waiting for a ctxp\n",
1935 oxid, sid);
1936
1937 list_del_init(&nvmebuf->hbuf.list);
1938 phba->sli4_hba.nvmet_io_wait_cnt--;
1939 found = true;
1940 break;
1941 }
1942 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1943 iflag);
1944
1945 /* free buffer since already posted a new DMA buffer to RQ */
1946 if (found) {
1947 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1948 /* Respond with BA_ACC accordingly */
1949 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1950 return 0;
1951 }
1952 }
1953
1954 /* check active list */
1955 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1956 if (ctxp) {
1957 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1958
1959 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1960 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1961 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1962
1963 lpfc_nvmeio_data(phba,
1964 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1965 xri, raw_smp_processor_id(), 0);
1966
1967 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1968 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1969 "flag x%x state x%x\n",
1970 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1971
1972 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1973 /* Notify the transport */
1974 nvmet_fc_rcv_fcp_abort(phba->targetport,
1975 &ctxp->hdlrctx.fcp_req);
1976 } else {
1977 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1978 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1979 lpfc_nvmet_defer_release(phba, ctxp);
1980 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1981 }
1982 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1983 ctxp->oxid);
1984
1985 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1986 return 0;
1987 }
1988
1989 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1990 oxid, raw_smp_processor_id(), 1);
1991
1992 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1993 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1994
1995 /* Respond with BA_RJT accordingly */
1996 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1997 #endif
1998 return 0;
1999 }
2000
2001 static void
lpfc_nvmet_wqfull_flush(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_async_xchg_ctx * ctxp)2002 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2003 struct lpfc_async_xchg_ctx *ctxp)
2004 {
2005 struct lpfc_sli_ring *pring;
2006 struct lpfc_iocbq *nvmewqeq;
2007 struct lpfc_iocbq *next_nvmewqeq;
2008 unsigned long iflags;
2009 struct lpfc_wcqe_complete wcqe;
2010 struct lpfc_wcqe_complete *wcqep;
2011
2012 pring = wq->pring;
2013 wcqep = &wcqe;
2014
2015 /* Fake an ABORT error code back to cmpl routine */
2016 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2017 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2018 wcqep->parameter = IOERR_ABORT_REQUESTED;
2019
2020 spin_lock_irqsave(&pring->ring_lock, iflags);
2021 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2022 &wq->wqfull_list, list) {
2023 if (ctxp) {
2024 /* Checking for a specific IO to flush */
2025 if (nvmewqeq->context_un.axchg == ctxp) {
2026 list_del(&nvmewqeq->list);
2027 spin_unlock_irqrestore(&pring->ring_lock,
2028 iflags);
2029 memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
2030 sizeof(*wcqep));
2031 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2032 nvmewqeq);
2033 return;
2034 }
2035 continue;
2036 } else {
2037 /* Flush all IOs */
2038 list_del(&nvmewqeq->list);
2039 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2040 memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
2041 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
2042 spin_lock_irqsave(&pring->ring_lock, iflags);
2043 }
2044 }
2045 if (!ctxp)
2046 wq->q_flag &= ~HBA_NVMET_WQFULL;
2047 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2048 }
2049
2050 void
lpfc_nvmet_wqfull_process(struct lpfc_hba * phba,struct lpfc_queue * wq)2051 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2052 struct lpfc_queue *wq)
2053 {
2054 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2055 struct lpfc_sli_ring *pring;
2056 struct lpfc_iocbq *nvmewqeq;
2057 struct lpfc_async_xchg_ctx *ctxp;
2058 unsigned long iflags;
2059 int rc;
2060
2061 /*
2062 * Some WQE slots are available, so try to re-issue anything
2063 * on the WQ wqfull_list.
2064 */
2065 pring = wq->pring;
2066 spin_lock_irqsave(&pring->ring_lock, iflags);
2067 while (!list_empty(&wq->wqfull_list)) {
2068 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2069 list);
2070 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2071 ctxp = nvmewqeq->context_un.axchg;
2072 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2073 spin_lock_irqsave(&pring->ring_lock, iflags);
2074 if (rc == -EBUSY) {
2075 /* WQ was full again, so put it back on the list */
2076 list_add(&nvmewqeq->list, &wq->wqfull_list);
2077 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2078 return;
2079 }
2080 if (rc == WQE_SUCCESS) {
2081 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2082 if (ctxp->ts_cmd_nvme) {
2083 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2084 ctxp->ts_status_wqput = ktime_get_ns();
2085 else
2086 ctxp->ts_data_wqput = ktime_get_ns();
2087 }
2088 #endif
2089 } else {
2090 WARN_ON(rc);
2091 }
2092 }
2093 wq->q_flag &= ~HBA_NVMET_WQFULL;
2094 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2095
2096 #endif
2097 }
2098
2099 void
lpfc_nvmet_destroy_targetport(struct lpfc_hba * phba)2100 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2101 {
2102 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2103 struct lpfc_nvmet_tgtport *tgtp;
2104 struct lpfc_queue *wq;
2105 uint32_t qidx;
2106 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2107
2108 if (phba->nvmet_support == 0)
2109 return;
2110 if (phba->targetport) {
2111 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2112 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2113 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2114 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2115 }
2116 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2117 nvmet_fc_unregister_targetport(phba->targetport);
2118 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2119 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2121 "6179 Unreg targetport x%px timeout "
2122 "reached.\n", phba->targetport);
2123 lpfc_nvmet_cleanup_io_context(phba);
2124 }
2125 phba->targetport = NULL;
2126 #endif
2127 }
2128
2129 /**
2130 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2131 * @phba: pointer to lpfc hba data structure.
2132 * @axchg: pointer to exchange context for the NVME LS request
2133 *
2134 * This routine is used for processing an asychronously received NVME LS
2135 * request. Any remaining validation is done and the LS is then forwarded
2136 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2137 *
2138 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2139 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2140 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2141 *
2142 * Returns 0 if LS was handled and delivered to the transport
2143 * Returns 1 if LS failed to be handled and should be dropped
2144 */
2145 int
lpfc_nvmet_handle_lsreq(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * axchg)2146 lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2147 struct lpfc_async_xchg_ctx *axchg)
2148 {
2149 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2150 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2151 uint32_t *payload = axchg->payload;
2152 int rc;
2153
2154 atomic_inc(&tgtp->rcv_ls_req_in);
2155
2156 /*
2157 * Driver passes the ndlp as the hosthandle argument allowing
2158 * the transport to generate LS requests for any associateions
2159 * that are created.
2160 */
2161 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2162 axchg->payload, axchg->size);
2163
2164 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2165 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2166 "%08x %08x %08x\n", axchg->size, rc,
2167 *payload, *(payload+1), *(payload+2),
2168 *(payload+3), *(payload+4), *(payload+5));
2169
2170 if (!rc) {
2171 atomic_inc(&tgtp->rcv_ls_req_out);
2172 return 0;
2173 }
2174
2175 atomic_inc(&tgtp->rcv_ls_req_drop);
2176 #endif
2177 return 1;
2178 }
2179
2180 static void
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf * ctx_buf)2181 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2182 {
2183 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2184 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2185 struct lpfc_hba *phba = ctxp->phba;
2186 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2187 struct lpfc_nvmet_tgtport *tgtp;
2188 uint32_t *payload, qno;
2189 uint32_t rc;
2190 unsigned long iflags;
2191
2192 if (!nvmebuf) {
2193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2194 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2195 "oxid: x%x flg: x%x state: x%x\n",
2196 ctxp->oxid, ctxp->flag, ctxp->state);
2197 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2198 lpfc_nvmet_defer_release(phba, ctxp);
2199 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2200 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2201 ctxp->oxid);
2202 return;
2203 }
2204
2205 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2207 "6324 IO oxid x%x aborted\n",
2208 ctxp->oxid);
2209 return;
2210 }
2211
2212 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2213 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2214 ctxp->flag |= LPFC_NVME_TNOTIFY;
2215 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2216 if (ctxp->ts_isr_cmd)
2217 ctxp->ts_cmd_nvme = ktime_get_ns();
2218 #endif
2219 /*
2220 * The calling sequence should be:
2221 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2222 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2223 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2224 * the NVME command / FC header is stored.
2225 * A buffer has already been reposted for this IO, so just free
2226 * the nvmebuf.
2227 */
2228 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2229 payload, ctxp->size);
2230 /* Process FCP command */
2231 if (rc == 0) {
2232 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2233 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2234 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2235 (nvmebuf != ctxp->rqb_buffer)) {
2236 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2237 return;
2238 }
2239 ctxp->rqb_buffer = NULL;
2240 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2241 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2242 return;
2243 }
2244
2245 /* Processing of FCP command is deferred */
2246 if (rc == -EOVERFLOW) {
2247 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2248 "from %06x\n",
2249 ctxp->oxid, ctxp->size, ctxp->sid);
2250 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2251 atomic_inc(&tgtp->defer_fod);
2252 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2253 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2254 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2255 return;
2256 }
2257 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2258 /*
2259 * Post a replacement DMA buffer to RQ and defer
2260 * freeing rcv buffer till .defer_rcv callback
2261 */
2262 qno = nvmebuf->idx;
2263 lpfc_post_rq_buffer(
2264 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2265 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2266 return;
2267 }
2268 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2269 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2271 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2272 ctxp->oxid, rc,
2273 atomic_read(&tgtp->rcv_fcp_cmd_in),
2274 atomic_read(&tgtp->rcv_fcp_cmd_out),
2275 atomic_read(&tgtp->xmt_fcp_release));
2276 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2277 ctxp->oxid, ctxp->size, ctxp->sid);
2278 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2279 lpfc_nvmet_defer_release(phba, ctxp);
2280 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2281 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2282 #endif
2283 }
2284
2285 static void
lpfc_nvmet_fcp_rqst_defer_work(struct work_struct * work)2286 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2287 {
2288 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2289 struct lpfc_nvmet_ctxbuf *ctx_buf =
2290 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2291
2292 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2293 #endif
2294 }
2295
2296 static struct lpfc_nvmet_ctxbuf *
lpfc_nvmet_replenish_context(struct lpfc_hba * phba,struct lpfc_nvmet_ctx_info * current_infop)2297 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2298 struct lpfc_nvmet_ctx_info *current_infop)
2299 {
2300 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2301 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2302 struct lpfc_nvmet_ctx_info *get_infop;
2303 int i;
2304
2305 /*
2306 * The current_infop for the MRQ a NVME command IU was received
2307 * on is empty. Our goal is to replenish this MRQs context
2308 * list from a another CPUs.
2309 *
2310 * First we need to pick a context list to start looking on.
2311 * nvmet_ctx_start_cpu has available context the last time
2312 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2313 * is just the next sequential CPU for this MRQ.
2314 */
2315 if (current_infop->nvmet_ctx_start_cpu)
2316 get_infop = current_infop->nvmet_ctx_start_cpu;
2317 else
2318 get_infop = current_infop->nvmet_ctx_next_cpu;
2319
2320 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2321 if (get_infop == current_infop) {
2322 get_infop = get_infop->nvmet_ctx_next_cpu;
2323 continue;
2324 }
2325 spin_lock(&get_infop->nvmet_ctx_list_lock);
2326
2327 /* Just take the entire context list, if there are any */
2328 if (get_infop->nvmet_ctx_list_cnt) {
2329 list_splice_init(&get_infop->nvmet_ctx_list,
2330 ¤t_infop->nvmet_ctx_list);
2331 current_infop->nvmet_ctx_list_cnt =
2332 get_infop->nvmet_ctx_list_cnt - 1;
2333 get_infop->nvmet_ctx_list_cnt = 0;
2334 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2335
2336 current_infop->nvmet_ctx_start_cpu = get_infop;
2337 list_remove_head(¤t_infop->nvmet_ctx_list,
2338 ctx_buf, struct lpfc_nvmet_ctxbuf,
2339 list);
2340 return ctx_buf;
2341 }
2342
2343 /* Otherwise, move on to the next CPU for this MRQ */
2344 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2345 get_infop = get_infop->nvmet_ctx_next_cpu;
2346 }
2347
2348 #endif
2349 /* Nothing found, all contexts for the MRQ are in-flight */
2350 return NULL;
2351 }
2352
2353 /**
2354 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2355 * @phba: pointer to lpfc hba data structure.
2356 * @idx: relative index of MRQ vector
2357 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2358 * @isr_timestamp: in jiffies.
2359 * @cqflag: cq processing information regarding workload.
2360 *
2361 * This routine is used for processing the WQE associated with a unsolicited
2362 * event. It first determines whether there is an existing ndlp that matches
2363 * the DID from the unsolicited WQE. If not, it will create a new one with
2364 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2365 * WQE is then used to invoke the proper routine and to set up proper state
2366 * of the discovery state machine.
2367 **/
2368 static void
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba * phba,uint32_t idx,struct rqb_dmabuf * nvmebuf,uint64_t isr_timestamp,uint8_t cqflag)2369 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2370 uint32_t idx,
2371 struct rqb_dmabuf *nvmebuf,
2372 uint64_t isr_timestamp,
2373 uint8_t cqflag)
2374 {
2375 struct lpfc_async_xchg_ctx *ctxp;
2376 struct lpfc_nvmet_tgtport *tgtp;
2377 struct fc_frame_header *fc_hdr;
2378 struct lpfc_nvmet_ctxbuf *ctx_buf;
2379 struct lpfc_nvmet_ctx_info *current_infop;
2380 uint32_t size, oxid, sid, qno;
2381 unsigned long iflag;
2382 int current_cpu;
2383
2384 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2385 return;
2386
2387 ctx_buf = NULL;
2388 if (!nvmebuf || !phba->targetport) {
2389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2390 "6157 NVMET FCP Drop IO\n");
2391 if (nvmebuf)
2392 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2393 return;
2394 }
2395
2396 /*
2397 * Get a pointer to the context list for this MRQ based on
2398 * the CPU this MRQ IRQ is associated with. If the CPU association
2399 * changes from our initial assumption, the context list could
2400 * be empty, thus it would need to be replenished with the
2401 * context list from another CPU for this MRQ.
2402 */
2403 current_cpu = raw_smp_processor_id();
2404 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2405 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2406 if (current_infop->nvmet_ctx_list_cnt) {
2407 list_remove_head(¤t_infop->nvmet_ctx_list,
2408 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2409 current_infop->nvmet_ctx_list_cnt--;
2410 } else {
2411 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2412 }
2413 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2414
2415 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2416 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2417 size = nvmebuf->bytes_recv;
2418
2419 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2420 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2421 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2422 if (idx != current_cpu)
2423 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2424 "6703 CPU Check rcv: "
2425 "cpu %d expect %d\n",
2426 current_cpu, idx);
2427 }
2428 #endif
2429
2430 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2431 oxid, size, raw_smp_processor_id());
2432
2433 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2434
2435 if (!ctx_buf) {
2436 /* Queue this NVME IO to process later */
2437 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2438 list_add_tail(&nvmebuf->hbuf.list,
2439 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2440 phba->sli4_hba.nvmet_io_wait_cnt++;
2441 phba->sli4_hba.nvmet_io_wait_total++;
2442 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2443 iflag);
2444
2445 /* Post a brand new DMA buffer to RQ */
2446 qno = nvmebuf->idx;
2447 lpfc_post_rq_buffer(
2448 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2449 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2450
2451 atomic_inc(&tgtp->defer_ctx);
2452 return;
2453 }
2454
2455 sid = sli4_sid_from_fc_hdr(fc_hdr);
2456
2457 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2458 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2459 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2460 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2461 if (ctxp->state != LPFC_NVME_STE_FREE) {
2462 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2463 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2464 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2465 }
2466 ctxp->wqeq = NULL;
2467 ctxp->offset = 0;
2468 ctxp->phba = phba;
2469 ctxp->size = size;
2470 ctxp->oxid = oxid;
2471 ctxp->sid = sid;
2472 ctxp->idx = idx;
2473 ctxp->state = LPFC_NVME_STE_RCV;
2474 ctxp->entry_cnt = 1;
2475 ctxp->flag = 0;
2476 ctxp->ctxbuf = ctx_buf;
2477 ctxp->rqb_buffer = (void *)nvmebuf;
2478 ctxp->hdwq = NULL;
2479 spin_lock_init(&ctxp->ctxlock);
2480
2481 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2482 if (isr_timestamp)
2483 ctxp->ts_isr_cmd = isr_timestamp;
2484 ctxp->ts_cmd_nvme = 0;
2485 ctxp->ts_nvme_data = 0;
2486 ctxp->ts_data_wqput = 0;
2487 ctxp->ts_isr_data = 0;
2488 ctxp->ts_data_nvme = 0;
2489 ctxp->ts_nvme_status = 0;
2490 ctxp->ts_status_wqput = 0;
2491 ctxp->ts_isr_status = 0;
2492 ctxp->ts_status_nvme = 0;
2493 #endif
2494
2495 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2496 /* check for cq processing load */
2497 if (!cqflag) {
2498 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2499 return;
2500 }
2501
2502 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2503 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2505 "6325 Unable to queue work for oxid x%x. "
2506 "FCP Drop IO [x%x x%x x%x]\n",
2507 ctxp->oxid,
2508 atomic_read(&tgtp->rcv_fcp_cmd_in),
2509 atomic_read(&tgtp->rcv_fcp_cmd_out),
2510 atomic_read(&tgtp->xmt_fcp_release));
2511
2512 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2513 lpfc_nvmet_defer_release(phba, ctxp);
2514 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2515 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2516 }
2517 }
2518
2519 /**
2520 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2521 * @phba: pointer to lpfc hba data structure.
2522 * @idx: relative index of MRQ vector
2523 * @nvmebuf: pointer to received nvme data structure.
2524 * @isr_timestamp: in jiffies.
2525 * @cqflag: cq processing information regarding workload.
2526 *
2527 * This routine is used to process an unsolicited event received from a SLI
2528 * (Service Level Interface) ring. The actual processing of the data buffer
2529 * associated with the unsolicited event is done by invoking the routine
2530 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2531 * SLI RQ on which the unsolicited event was received.
2532 **/
2533 void
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba * phba,uint32_t idx,struct rqb_dmabuf * nvmebuf,uint64_t isr_timestamp,uint8_t cqflag)2534 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2535 uint32_t idx,
2536 struct rqb_dmabuf *nvmebuf,
2537 uint64_t isr_timestamp,
2538 uint8_t cqflag)
2539 {
2540 if (!nvmebuf) {
2541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2542 "3167 NVMET FCP Drop IO\n");
2543 return;
2544 }
2545 if (phba->nvmet_support == 0) {
2546 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2547 return;
2548 }
2549 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2550 }
2551
2552 /**
2553 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2554 * @phba: pointer to a host N_Port data structure.
2555 * @ctxp: Context info for NVME LS Request
2556 * @rspbuf: DMA buffer of NVME command.
2557 * @rspsize: size of the NVME command.
2558 *
2559 * This routine is used for allocating a lpfc-WQE data structure from
2560 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2561 * passed into the routine for discovery state machine to issue an Extended
2562 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2563 * and preparation routine that is used by all the discovery state machine
2564 * routines and the NVME command-specific fields will be later set up by
2565 * the individual discovery machine routines after calling this routine
2566 * allocating and preparing a generic WQE data structure. It fills in the
2567 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2568 * payload and response payload (if expected). The reference count on the
2569 * ndlp is incremented by 1 and the reference to the ndlp is put into
2570 * context1 of the WQE data structure for this WQE to hold the ndlp
2571 * reference for the command's callback function to access later.
2572 *
2573 * Return code
2574 * Pointer to the newly allocated/prepared nvme wqe data structure
2575 * NULL - when nvme wqe data structure allocation/preparation failed
2576 **/
2577 static struct lpfc_iocbq *
lpfc_nvmet_prep_ls_wqe(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,dma_addr_t rspbuf,uint16_t rspsize)2578 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2579 struct lpfc_async_xchg_ctx *ctxp,
2580 dma_addr_t rspbuf, uint16_t rspsize)
2581 {
2582 struct lpfc_nodelist *ndlp;
2583 struct lpfc_iocbq *nvmewqe;
2584 union lpfc_wqe128 *wqe;
2585
2586 if (!lpfc_is_link_up(phba)) {
2587 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2588 "6104 NVMET prep LS wqe: link err: "
2589 "NPORT x%x oxid:x%x ste %d\n",
2590 ctxp->sid, ctxp->oxid, ctxp->state);
2591 return NULL;
2592 }
2593
2594 /* Allocate buffer for command wqe */
2595 nvmewqe = lpfc_sli_get_iocbq(phba);
2596 if (nvmewqe == NULL) {
2597 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2598 "6105 NVMET prep LS wqe: No WQE: "
2599 "NPORT x%x oxid x%x ste %d\n",
2600 ctxp->sid, ctxp->oxid, ctxp->state);
2601 return NULL;
2602 }
2603
2604 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2605 if (!ndlp ||
2606 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2607 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2609 "6106 NVMET prep LS wqe: No ndlp: "
2610 "NPORT x%x oxid x%x ste %d\n",
2611 ctxp->sid, ctxp->oxid, ctxp->state);
2612 goto nvme_wqe_free_wqeq_exit;
2613 }
2614 ctxp->wqeq = nvmewqe;
2615
2616 /* prevent preparing wqe with NULL ndlp reference */
2617 nvmewqe->ndlp = lpfc_nlp_get(ndlp);
2618 if (!nvmewqe->ndlp)
2619 goto nvme_wqe_free_wqeq_exit;
2620 nvmewqe->context_un.axchg = ctxp;
2621
2622 wqe = &nvmewqe->wqe;
2623 memset(wqe, 0, sizeof(union lpfc_wqe));
2624
2625 /* Words 0 - 2 */
2626 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2627 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2628 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2629 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2630
2631 /* Word 3 */
2632
2633 /* Word 4 */
2634
2635 /* Word 5 */
2636 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2637 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2638 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2639 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2640 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2641
2642 /* Word 6 */
2643 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2644 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2645 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2646
2647 /* Word 7 */
2648 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2649 CMD_XMIT_SEQUENCE64_WQE);
2650 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2651 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2652 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2653
2654 /* Word 8 */
2655 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2656
2657 /* Word 9 */
2658 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2659 /* Needs to be set by caller */
2660 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2661
2662 /* Word 10 */
2663 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2664 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2665 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2666 LPFC_WQE_LENLOC_WORD12);
2667 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2668
2669 /* Word 11 */
2670 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2671 LPFC_WQE_CQ_ID_DEFAULT);
2672 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2673 OTHER_COMMAND);
2674
2675 /* Word 12 */
2676 wqe->xmit_sequence.xmit_len = rspsize;
2677
2678 nvmewqe->retry = 1;
2679 nvmewqe->vport = phba->pport;
2680 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2681 nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
2682
2683 /* Xmit NVMET response to remote NPORT <did> */
2684 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2685 "6039 Xmit NVMET LS response to remote "
2686 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2687 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2688 rspsize);
2689 return nvmewqe;
2690
2691 nvme_wqe_free_wqeq_exit:
2692 nvmewqe->context_un.axchg = NULL;
2693 nvmewqe->ndlp = NULL;
2694 nvmewqe->bpl_dmabuf = NULL;
2695 lpfc_sli_release_iocbq(phba, nvmewqe);
2696 return NULL;
2697 }
2698
2699
2700 static struct lpfc_iocbq *
lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)2701 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2702 struct lpfc_async_xchg_ctx *ctxp)
2703 {
2704 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2705 struct lpfc_nvmet_tgtport *tgtp;
2706 struct sli4_sge *sgl;
2707 struct lpfc_nodelist *ndlp;
2708 struct lpfc_iocbq *nvmewqe;
2709 struct scatterlist *sgel;
2710 union lpfc_wqe128 *wqe;
2711 struct ulp_bde64 *bde;
2712 dma_addr_t physaddr;
2713 int i, cnt, nsegs;
2714 bool use_pbde = false;
2715 int xc = 1;
2716
2717 if (!lpfc_is_link_up(phba)) {
2718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2719 "6107 NVMET prep FCP wqe: link err:"
2720 "NPORT x%x oxid x%x ste %d\n",
2721 ctxp->sid, ctxp->oxid, ctxp->state);
2722 return NULL;
2723 }
2724
2725 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2726 if (!ndlp ||
2727 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2728 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2729 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2730 "6108 NVMET prep FCP wqe: no ndlp: "
2731 "NPORT x%x oxid x%x ste %d\n",
2732 ctxp->sid, ctxp->oxid, ctxp->state);
2733 return NULL;
2734 }
2735
2736 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2737 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2738 "6109 NVMET prep FCP wqe: seg cnt err: "
2739 "NPORT x%x oxid x%x ste %d cnt %d\n",
2740 ctxp->sid, ctxp->oxid, ctxp->state,
2741 phba->cfg_nvme_seg_cnt);
2742 return NULL;
2743 }
2744 nsegs = rsp->sg_cnt;
2745
2746 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2747 nvmewqe = ctxp->wqeq;
2748 if (nvmewqe == NULL) {
2749 /* Allocate buffer for command wqe */
2750 nvmewqe = ctxp->ctxbuf->iocbq;
2751 if (nvmewqe == NULL) {
2752 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2753 "6110 NVMET prep FCP wqe: No "
2754 "WQE: NPORT x%x oxid x%x ste %d\n",
2755 ctxp->sid, ctxp->oxid, ctxp->state);
2756 return NULL;
2757 }
2758 ctxp->wqeq = nvmewqe;
2759 xc = 0; /* create new XRI */
2760 nvmewqe->sli4_lxritag = NO_XRI;
2761 nvmewqe->sli4_xritag = NO_XRI;
2762 }
2763
2764 /* Sanity check */
2765 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2766 (ctxp->entry_cnt == 1)) ||
2767 (ctxp->state == LPFC_NVME_STE_DATA)) {
2768 wqe = &nvmewqe->wqe;
2769 } else {
2770 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2771 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2772 ctxp->state, ctxp->entry_cnt);
2773 return NULL;
2774 }
2775
2776 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2777 switch (rsp->op) {
2778 case NVMET_FCOP_READDATA:
2779 case NVMET_FCOP_READDATA_RSP:
2780 /* From the tsend template, initialize words 7 - 11 */
2781 memcpy(&wqe->words[7],
2782 &lpfc_tsend_cmd_template.words[7],
2783 sizeof(uint32_t) * 5);
2784
2785 /* Words 0 - 2 : The first sg segment */
2786 sgel = &rsp->sg[0];
2787 physaddr = sg_dma_address(sgel);
2788 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2789 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2790 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2791 wqe->fcp_tsend.bde.addrHigh =
2792 cpu_to_le32(putPaddrHigh(physaddr));
2793
2794 /* Word 3 */
2795 wqe->fcp_tsend.payload_offset_len = 0;
2796
2797 /* Word 4 */
2798 wqe->fcp_tsend.relative_offset = ctxp->offset;
2799
2800 /* Word 5 */
2801 wqe->fcp_tsend.reserved = 0;
2802
2803 /* Word 6 */
2804 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2805 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2806 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2807 nvmewqe->sli4_xritag);
2808
2809 /* Word 7 - set ar later */
2810
2811 /* Word 8 */
2812 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2813
2814 /* Word 9 */
2815 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2816 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2817
2818 /* Word 10 - set wqes later, in template xc=1 */
2819 if (!xc)
2820 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2821
2822 /* Word 12 */
2823 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2824
2825 /* Setup 2 SKIP SGEs */
2826 sgl->addr_hi = 0;
2827 sgl->addr_lo = 0;
2828 sgl->word2 = 0;
2829 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2830 sgl->word2 = cpu_to_le32(sgl->word2);
2831 sgl->sge_len = 0;
2832 sgl++;
2833 sgl->addr_hi = 0;
2834 sgl->addr_lo = 0;
2835 sgl->word2 = 0;
2836 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2837 sgl->word2 = cpu_to_le32(sgl->word2);
2838 sgl->sge_len = 0;
2839 sgl++;
2840 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2841 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2842
2843 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2844
2845 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2846 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2847 bf_set(wqe_sup,
2848 &wqe->fcp_tsend.wqe_com, 1);
2849 } else {
2850 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2851 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2852 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2853 ((rsp->rsplen >> 2) - 1));
2854 memcpy(&wqe->words[16], rsp->rspaddr,
2855 rsp->rsplen);
2856 }
2857 } else {
2858 atomic_inc(&tgtp->xmt_fcp_read);
2859
2860 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2861 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2862 }
2863 break;
2864
2865 case NVMET_FCOP_WRITEDATA:
2866 /* From the treceive template, initialize words 3 - 11 */
2867 memcpy(&wqe->words[3],
2868 &lpfc_treceive_cmd_template.words[3],
2869 sizeof(uint32_t) * 9);
2870
2871 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2872 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2873 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2874 wqe->fcp_treceive.bde.addrLow = 0;
2875 wqe->fcp_treceive.bde.addrHigh = 0;
2876
2877 /* Word 4 */
2878 wqe->fcp_treceive.relative_offset = ctxp->offset;
2879
2880 /* Word 6 */
2881 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2882 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2883 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2884 nvmewqe->sli4_xritag);
2885
2886 /* Word 7 */
2887
2888 /* Word 8 */
2889 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2890
2891 /* Word 9 */
2892 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2893 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2894
2895 /* Word 10 - in template xc=1 */
2896 if (!xc)
2897 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2898
2899 /* Word 11 - check for pbde */
2900 if (nsegs == 1 && phba->cfg_enable_pbde) {
2901 use_pbde = true;
2902 /* Word 11 - PBDE bit already preset by template */
2903 } else {
2904 /* Overwrite default template setting */
2905 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2906 }
2907
2908 /* Word 12 */
2909 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2910
2911 /* Setup 2 SKIP SGEs */
2912 sgl->addr_hi = 0;
2913 sgl->addr_lo = 0;
2914 sgl->word2 = 0;
2915 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2916 sgl->word2 = cpu_to_le32(sgl->word2);
2917 sgl->sge_len = 0;
2918 sgl++;
2919 sgl->addr_hi = 0;
2920 sgl->addr_lo = 0;
2921 sgl->word2 = 0;
2922 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2923 sgl->word2 = cpu_to_le32(sgl->word2);
2924 sgl->sge_len = 0;
2925 sgl++;
2926 atomic_inc(&tgtp->xmt_fcp_write);
2927 break;
2928
2929 case NVMET_FCOP_RSP:
2930 /* From the treceive template, initialize words 4 - 11 */
2931 memcpy(&wqe->words[4],
2932 &lpfc_trsp_cmd_template.words[4],
2933 sizeof(uint32_t) * 8);
2934
2935 /* Words 0 - 2 */
2936 physaddr = rsp->rspdma;
2937 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2938 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2939 wqe->fcp_trsp.bde.addrLow =
2940 cpu_to_le32(putPaddrLow(physaddr));
2941 wqe->fcp_trsp.bde.addrHigh =
2942 cpu_to_le32(putPaddrHigh(physaddr));
2943
2944 /* Word 3 */
2945 wqe->fcp_trsp.response_len = rsp->rsplen;
2946
2947 /* Word 6 */
2948 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2949 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2950 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2951 nvmewqe->sli4_xritag);
2952
2953 /* Word 7 */
2954
2955 /* Word 8 */
2956 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2957
2958 /* Word 9 */
2959 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2960 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2961
2962 /* Word 10 */
2963 if (xc)
2964 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2965
2966 /* Word 11 */
2967 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2968 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2969 /* Bad response - embed it */
2970 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2971 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2972 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2973 ((rsp->rsplen >> 2) - 1));
2974 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2975 }
2976
2977 /* Word 12 */
2978 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2979
2980 /* Use rspbuf, NOT sg list */
2981 nsegs = 0;
2982 sgl->word2 = 0;
2983 atomic_inc(&tgtp->xmt_fcp_rsp);
2984 break;
2985
2986 default:
2987 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2988 "6064 Unknown Rsp Op %d\n",
2989 rsp->op);
2990 return NULL;
2991 }
2992
2993 nvmewqe->retry = 1;
2994 nvmewqe->vport = phba->pport;
2995 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2996 nvmewqe->ndlp = ndlp;
2997
2998 for_each_sg(rsp->sg, sgel, nsegs, i) {
2999 physaddr = sg_dma_address(sgel);
3000 cnt = sg_dma_len(sgel);
3001 sgl->addr_hi = putPaddrHigh(physaddr);
3002 sgl->addr_lo = putPaddrLow(physaddr);
3003 sgl->word2 = 0;
3004 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3005 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3006 if ((i+1) == rsp->sg_cnt)
3007 bf_set(lpfc_sli4_sge_last, sgl, 1);
3008 sgl->word2 = cpu_to_le32(sgl->word2);
3009 sgl->sge_len = cpu_to_le32(cnt);
3010 sgl++;
3011 ctxp->offset += cnt;
3012 }
3013
3014 bde = (struct ulp_bde64 *)&wqe->words[13];
3015 if (use_pbde) {
3016 /* decrement sgl ptr backwards once to first data sge */
3017 sgl--;
3018
3019 /* Words 13-15 (PBDE) */
3020 bde->addrLow = sgl->addr_lo;
3021 bde->addrHigh = sgl->addr_hi;
3022 bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
3023 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3024 bde->tus.w = cpu_to_le32(bde->tus.w);
3025 } else {
3026 memset(bde, 0, sizeof(struct ulp_bde64));
3027 }
3028 ctxp->state = LPFC_NVME_STE_DATA;
3029 ctxp->entry_cnt++;
3030 return nvmewqe;
3031 }
3032
3033 /**
3034 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3035 * @phba: Pointer to HBA context object.
3036 * @cmdwqe: Pointer to driver command WQE object.
3037 * @rspwqe: Pointer to driver response WQE object.
3038 *
3039 * The function is called from SLI ring event handler with no
3040 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3041 * The function frees memory resources used for the NVME commands.
3042 **/
3043 static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)3044 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3045 struct lpfc_iocbq *rspwqe)
3046 {
3047 struct lpfc_async_xchg_ctx *ctxp;
3048 struct lpfc_nvmet_tgtport *tgtp;
3049 uint32_t result;
3050 unsigned long flags;
3051 bool released = false;
3052 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3053
3054 ctxp = cmdwqe->context_un.axchg;
3055 result = wcqe->parameter;
3056
3057 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3058 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3059 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3060
3061 spin_lock_irqsave(&ctxp->ctxlock, flags);
3062 ctxp->state = LPFC_NVME_STE_DONE;
3063
3064 /* Check if we already received a free context call
3065 * and we have completed processing an abort situation.
3066 */
3067 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3068 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3069 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3070 list_del_init(&ctxp->list);
3071 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3072 released = true;
3073 }
3074 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3075 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3076 atomic_inc(&tgtp->xmt_abort_rsp);
3077
3078 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3079 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3080 "WCQE: %08x %08x %08x %08x\n",
3081 ctxp->oxid, ctxp->flag, released,
3082 wcqe->word0, wcqe->total_data_placed,
3083 result, wcqe->word3);
3084
3085 cmdwqe->rsp_dmabuf = NULL;
3086 cmdwqe->bpl_dmabuf = NULL;
3087 /*
3088 * if transport has released ctx, then can reuse it. Otherwise,
3089 * will be recycled by transport release call.
3090 */
3091 if (released)
3092 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3093
3094 /* This is the iocbq for the abort, not the command */
3095 lpfc_sli_release_iocbq(phba, cmdwqe);
3096
3097 /* Since iaab/iaar are NOT set, there is no work left.
3098 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3099 * should have been called already.
3100 */
3101 }
3102
3103 /**
3104 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3105 * @phba: Pointer to HBA context object.
3106 * @cmdwqe: Pointer to driver command WQE object.
3107 * @rspwqe: Pointer to driver response WQE object.
3108 *
3109 * The function is called from SLI ring event handler with no
3110 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3111 * The function frees memory resources used for the NVME commands.
3112 **/
3113 static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)3114 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3115 struct lpfc_iocbq *rspwqe)
3116 {
3117 struct lpfc_async_xchg_ctx *ctxp;
3118 struct lpfc_nvmet_tgtport *tgtp;
3119 unsigned long flags;
3120 uint32_t result;
3121 bool released = false;
3122 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3123
3124 ctxp = cmdwqe->context_un.axchg;
3125 result = wcqe->parameter;
3126
3127 if (!ctxp) {
3128 /* if context is clear, related io alrady complete */
3129 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3130 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3131 wcqe->word0, wcqe->total_data_placed,
3132 result, wcqe->word3);
3133 return;
3134 }
3135
3136 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3137 spin_lock_irqsave(&ctxp->ctxlock, flags);
3138 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3139 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3140
3141 /* Sanity check */
3142 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3144 "6112 ABTS Wrong state:%d oxid x%x\n",
3145 ctxp->state, ctxp->oxid);
3146 }
3147
3148 /* Check if we already received a free context call
3149 * and we have completed processing an abort situation.
3150 */
3151 ctxp->state = LPFC_NVME_STE_DONE;
3152 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3153 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3154 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3155 list_del_init(&ctxp->list);
3156 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3157 released = true;
3158 }
3159 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3160 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3161 atomic_inc(&tgtp->xmt_abort_rsp);
3162
3163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3164 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3165 "WCQE: %08x %08x %08x %08x\n",
3166 ctxp->oxid, ctxp->flag, released,
3167 wcqe->word0, wcqe->total_data_placed,
3168 result, wcqe->word3);
3169
3170 cmdwqe->rsp_dmabuf = NULL;
3171 cmdwqe->bpl_dmabuf = NULL;
3172 /*
3173 * if transport has released ctx, then can reuse it. Otherwise,
3174 * will be recycled by transport release call.
3175 */
3176 if (released)
3177 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3178
3179 /* Since iaab/iaar are NOT set, there is no work left.
3180 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3181 * should have been called already.
3182 */
3183 }
3184
3185 /**
3186 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3187 * @phba: Pointer to HBA context object.
3188 * @cmdwqe: Pointer to driver command WQE object.
3189 * @rspwqe: Pointer to driver response WQE object.
3190 *
3191 * The function is called from SLI ring event handler with no
3192 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3193 * The function frees memory resources used for the NVME commands.
3194 **/
3195 static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_iocbq * rspwqe)3196 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3197 struct lpfc_iocbq *rspwqe)
3198 {
3199 struct lpfc_async_xchg_ctx *ctxp;
3200 struct lpfc_nvmet_tgtport *tgtp;
3201 uint32_t result;
3202 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3203
3204 ctxp = cmdwqe->context_un.axchg;
3205 result = wcqe->parameter;
3206
3207 if (phba->nvmet_support) {
3208 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3209 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3210 }
3211
3212 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3213 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3214 ctxp, wcqe->word0, wcqe->total_data_placed,
3215 result, wcqe->word3);
3216
3217 if (!ctxp) {
3218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3219 "6415 NVMET LS Abort No ctx: WCQE: "
3220 "%08x %08x %08x %08x\n",
3221 wcqe->word0, wcqe->total_data_placed,
3222 result, wcqe->word3);
3223
3224 lpfc_sli_release_iocbq(phba, cmdwqe);
3225 return;
3226 }
3227
3228 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3230 "6416 NVMET LS abort cmpl state mismatch: "
3231 "oxid x%x: %d %d\n",
3232 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3233 }
3234
3235 cmdwqe->rsp_dmabuf = NULL;
3236 cmdwqe->bpl_dmabuf = NULL;
3237 lpfc_sli_release_iocbq(phba, cmdwqe);
3238 kfree(ctxp);
3239 }
3240
3241 static int
lpfc_nvmet_unsol_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3242 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3243 struct lpfc_async_xchg_ctx *ctxp,
3244 uint32_t sid, uint16_t xri)
3245 {
3246 struct lpfc_nvmet_tgtport *tgtp = NULL;
3247 struct lpfc_iocbq *abts_wqeq;
3248 union lpfc_wqe128 *wqe_abts;
3249 struct lpfc_nodelist *ndlp;
3250
3251 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3252 "6067 ABTS: sid %x xri x%x/x%x\n",
3253 sid, xri, ctxp->wqeq->sli4_xritag);
3254
3255 if (phba->nvmet_support && phba->targetport)
3256 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3257
3258 ndlp = lpfc_findnode_did(phba->pport, sid);
3259 if (!ndlp ||
3260 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3261 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3262 if (tgtp)
3263 atomic_inc(&tgtp->xmt_abort_rsp_error);
3264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3265 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3266 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3267
3268 /* No failure to an ABTS request. */
3269 return 0;
3270 }
3271
3272 abts_wqeq = ctxp->wqeq;
3273 wqe_abts = &abts_wqeq->wqe;
3274
3275 /*
3276 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3277 * that were initialized in lpfc_sli4_nvmet_alloc.
3278 */
3279 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3280
3281 /* Word 5 */
3282 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3283 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3284 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3285 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3286 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3287
3288 /* Word 6 */
3289 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3290 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3291 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3292 abts_wqeq->sli4_xritag);
3293
3294 /* Word 7 */
3295 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3296 CMD_XMIT_SEQUENCE64_WQE);
3297 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3298 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3299 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3300
3301 /* Word 8 */
3302 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3303
3304 /* Word 9 */
3305 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3306 /* Needs to be set by caller */
3307 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3308
3309 /* Word 10 */
3310 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3311 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3312 LPFC_WQE_LENLOC_WORD12);
3313 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3314 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3315
3316 /* Word 11 */
3317 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3318 LPFC_WQE_CQ_ID_DEFAULT);
3319 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3320 OTHER_COMMAND);
3321
3322 abts_wqeq->vport = phba->pport;
3323 abts_wqeq->ndlp = ndlp;
3324 abts_wqeq->context_un.axchg = ctxp;
3325 abts_wqeq->bpl_dmabuf = NULL;
3326 abts_wqeq->num_bdes = 0;
3327 /* hba_wqidx should already be setup from command we are aborting */
3328 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3329 abts_wqeq->iocb.ulpLe = 1;
3330
3331 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3332 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3333 xri, abts_wqeq->iotag);
3334 return 1;
3335 }
3336
3337 static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3338 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3339 struct lpfc_async_xchg_ctx *ctxp,
3340 uint32_t sid, uint16_t xri)
3341 {
3342 struct lpfc_nvmet_tgtport *tgtp;
3343 struct lpfc_iocbq *abts_wqeq;
3344 struct lpfc_nodelist *ndlp;
3345 unsigned long flags;
3346 bool ia;
3347 int rc;
3348
3349 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3350 if (!ctxp->wqeq) {
3351 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3352 ctxp->wqeq->hba_wqidx = 0;
3353 }
3354
3355 ndlp = lpfc_findnode_did(phba->pport, sid);
3356 if (!ndlp ||
3357 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3358 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3359 atomic_inc(&tgtp->xmt_abort_rsp_error);
3360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3361 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3362 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3363
3364 /* No failure to an ABTS request. */
3365 spin_lock_irqsave(&ctxp->ctxlock, flags);
3366 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3367 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3368 return 0;
3369 }
3370
3371 /* Issue ABTS for this WQE based on iotag */
3372 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3373 spin_lock_irqsave(&ctxp->ctxlock, flags);
3374 if (!ctxp->abort_wqeq) {
3375 atomic_inc(&tgtp->xmt_abort_rsp_error);
3376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3377 "6161 ABORT failed: No wqeqs: "
3378 "xri: x%x\n", ctxp->oxid);
3379 /* No failure to an ABTS request. */
3380 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3381 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3382 return 0;
3383 }
3384 abts_wqeq = ctxp->abort_wqeq;
3385 ctxp->state = LPFC_NVME_STE_ABORT;
3386 ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
3387 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3388
3389 /* Announce entry to new IO submit field. */
3390 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3391 "6162 ABORT Request to rport DID x%06x "
3392 "for xri x%x x%x\n",
3393 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3394
3395 /* If the hba is getting reset, this flag is set. It is
3396 * cleared when the reset is complete and rings reestablished.
3397 */
3398 /* driver queued commands are in process of being flushed */
3399 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
3400 atomic_inc(&tgtp->xmt_abort_rsp_error);
3401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3402 "6163 Driver in reset cleanup - flushing "
3403 "NVME Req now. hba_flag x%lx oxid x%x\n",
3404 phba->hba_flag, ctxp->oxid);
3405 lpfc_sli_release_iocbq(phba, abts_wqeq);
3406 spin_lock_irqsave(&ctxp->ctxlock, flags);
3407 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3408 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3409 return 0;
3410 }
3411
3412 spin_lock_irqsave(&phba->hbalock, flags);
3413 /* Outstanding abort is in progress */
3414 if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
3415 spin_unlock_irqrestore(&phba->hbalock, flags);
3416 atomic_inc(&tgtp->xmt_abort_rsp_error);
3417 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3418 "6164 Outstanding NVME I/O Abort Request "
3419 "still pending on oxid x%x\n",
3420 ctxp->oxid);
3421 lpfc_sli_release_iocbq(phba, abts_wqeq);
3422 spin_lock_irqsave(&ctxp->ctxlock, flags);
3423 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3424 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3425 return 0;
3426 }
3427
3428 /* Ready - mark outstanding as aborted by driver. */
3429 abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
3430
3431 lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
3432 abts_wqeq->iotag, CLASS3,
3433 LPFC_WQE_CQ_ID_DEFAULT, ia, true);
3434
3435 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3436 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3437 abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3438 abts_wqeq->cmd_flag |= LPFC_IO_NVME;
3439 abts_wqeq->context_un.axchg = ctxp;
3440 abts_wqeq->vport = phba->pport;
3441 if (!ctxp->hdwq)
3442 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3443
3444 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3445 spin_unlock_irqrestore(&phba->hbalock, flags);
3446 if (rc == WQE_SUCCESS) {
3447 atomic_inc(&tgtp->xmt_abort_sol);
3448 return 0;
3449 }
3450
3451 atomic_inc(&tgtp->xmt_abort_rsp_error);
3452 spin_lock_irqsave(&ctxp->ctxlock, flags);
3453 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3454 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3455 lpfc_sli_release_iocbq(phba, abts_wqeq);
3456 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3457 "6166 Failed ABORT issue_wqe with status x%x "
3458 "for oxid x%x.\n",
3459 rc, ctxp->oxid);
3460 return 1;
3461 }
3462
3463 static int
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3464 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3465 struct lpfc_async_xchg_ctx *ctxp,
3466 uint32_t sid, uint16_t xri)
3467 {
3468 struct lpfc_nvmet_tgtport *tgtp;
3469 struct lpfc_iocbq *abts_wqeq;
3470 unsigned long flags;
3471 bool released = false;
3472 int rc;
3473
3474 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3475 if (!ctxp->wqeq) {
3476 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3477 ctxp->wqeq->hba_wqidx = 0;
3478 }
3479
3480 if (ctxp->state == LPFC_NVME_STE_FREE) {
3481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3482 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3483 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3484 rc = WQE_BUSY;
3485 goto aerr;
3486 }
3487 ctxp->state = LPFC_NVME_STE_ABORT;
3488 ctxp->entry_cnt++;
3489 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3490 if (rc == 0)
3491 goto aerr;
3492
3493 spin_lock_irqsave(&phba->hbalock, flags);
3494 abts_wqeq = ctxp->wqeq;
3495 abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3496 abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
3497 if (!ctxp->hdwq)
3498 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3499
3500 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3501 spin_unlock_irqrestore(&phba->hbalock, flags);
3502 if (rc == WQE_SUCCESS) {
3503 return 0;
3504 }
3505
3506 aerr:
3507 spin_lock_irqsave(&ctxp->ctxlock, flags);
3508 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3509 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3510 list_del_init(&ctxp->list);
3511 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3512 released = true;
3513 }
3514 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3515 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3516
3517 atomic_inc(&tgtp->xmt_abort_rsp_error);
3518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3519 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3520 "(%x)\n",
3521 ctxp->oxid, rc, released);
3522 if (released)
3523 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3524 return 1;
3525 }
3526
3527 /**
3528 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3529 * via async frame receive where the frame is not handled.
3530 * @phba: pointer to adapter structure
3531 * @ctxp: pointer to the asynchronously received received sequence
3532 * @sid: address of the remote port to send the ABTS to
3533 * @xri: oxid value to for the ABTS (other side's exchange id).
3534 **/
3535 int
lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3536 lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3537 struct lpfc_async_xchg_ctx *ctxp,
3538 uint32_t sid, uint16_t xri)
3539 {
3540 struct lpfc_nvmet_tgtport *tgtp = NULL;
3541 struct lpfc_iocbq *abts_wqeq;
3542 unsigned long flags;
3543 int rc;
3544
3545 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3546 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3547 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3548 ctxp->entry_cnt++;
3549 } else {
3550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3551 "6418 NVMET LS abort state mismatch "
3552 "IO x%x: %d %d\n",
3553 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3554 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3555 }
3556
3557 if (phba->nvmet_support && phba->targetport)
3558 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3559
3560 if (!ctxp->wqeq) {
3561 /* Issue ABTS for this WQE based on iotag */
3562 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3563 if (!ctxp->wqeq) {
3564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3565 "6068 Abort failed: No wqeqs: "
3566 "xri: x%x\n", xri);
3567 /* No failure to an ABTS request. */
3568 kfree(ctxp);
3569 return 0;
3570 }
3571 }
3572 abts_wqeq = ctxp->wqeq;
3573
3574 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3575 rc = WQE_BUSY;
3576 goto out;
3577 }
3578
3579 spin_lock_irqsave(&phba->hbalock, flags);
3580 abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3581 abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
3582 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3583 spin_unlock_irqrestore(&phba->hbalock, flags);
3584 if (rc == WQE_SUCCESS) {
3585 if (tgtp)
3586 atomic_inc(&tgtp->xmt_abort_unsol);
3587 return 0;
3588 }
3589 out:
3590 if (tgtp)
3591 atomic_inc(&tgtp->xmt_abort_rsp_error);
3592 abts_wqeq->rsp_dmabuf = NULL;
3593 abts_wqeq->bpl_dmabuf = NULL;
3594 lpfc_sli_release_iocbq(phba, abts_wqeq);
3595 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3596 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3597 return 1;
3598 }
3599
3600 /**
3601 * lpfc_nvmet_invalidate_host
3602 *
3603 * @phba: pointer to the driver instance bound to an adapter port.
3604 * @ndlp: pointer to an lpfc_nodelist type
3605 *
3606 * This routine upcalls the nvmet transport to invalidate an NVME
3607 * host to which this target instance had active connections.
3608 */
3609 void
lpfc_nvmet_invalidate_host(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)3610 lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3611 {
3612 u32 ndlp_has_hh;
3613 struct lpfc_nvmet_tgtport *tgtp;
3614
3615 lpfc_printf_log(phba, KERN_INFO,
3616 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3617 "6203 Invalidating hosthandle x%px\n",
3618 ndlp);
3619
3620 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3621 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3622
3623 spin_lock_irq(&ndlp->lock);
3624 ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3625 spin_unlock_irq(&ndlp->lock);
3626
3627 /* Do not invalidate any nodes that do not have a hosthandle.
3628 * The host_release callbk will cause a node reference
3629 * count imbalance and a crash.
3630 */
3631 if (!ndlp_has_hh) {
3632 lpfc_printf_log(phba, KERN_INFO,
3633 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3634 "6204 Skip invalidate on node x%px DID x%x\n",
3635 ndlp, ndlp->nlp_DID);
3636 return;
3637 }
3638
3639 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3640 /* Need to get the nvmet_fc_target_port pointer here.*/
3641 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3642 #endif
3643 }
3644