1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32 
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39 
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
55 
56 #define LPFC_DEF_DEVLOSS_TMO	30
57 #define LPFC_MIN_DEVLOSS_TMO	1
58 #define LPFC_MAX_DEVLOSS_TMO	255
59 
60 /*
61  * Write key size should be multiple of 4. If write key is changed
62  * make sure that library write key is also changed.
63  */
64 #define LPFC_REG_WRITE_KEY_SIZE	4
65 #define LPFC_REG_WRITE_KEY	"EMLX"
66 
67 const char *const trunk_errmsg[] = {	/* map errcode */
68 	"",	/* There is no such error code at index 0*/
69 	"link negotiated speed does not match existing"
70 		" trunk - link was \"low\" speed",
71 	"link negotiated speed does not match"
72 		" existing trunk - link was \"middle\" speed",
73 	"link negotiated speed does not match existing"
74 		" trunk - link was \"high\" speed",
75 	"Attached to non-trunking port - F_Port",
76 	"Attached to non-trunking port - N_Port",
77 	"FLOGI response timeout",
78 	"non-FLOGI frame received",
79 	"Invalid FLOGI response",
80 	"Trunking initialization protocol",
81 	"Trunk peer device mismatch",
82 };
83 
84 /**
85  * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
86  * @incr: integer to convert.
87  * @hdw: ascii string holding converted integer plus a string terminator.
88  *
89  * Description:
90  * JEDEC Joint Electron Device Engineering Council.
91  * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
92  * character string. The string is then terminated with a NULL in byte 9.
93  * Hex 0-9 becomes ascii '0' to '9'.
94  * Hex a-f becomes ascii '=' to 'B' capital B.
95  *
96  * Notes:
97  * Coded for 32 bit integers only.
98  **/
99 static void
lpfc_jedec_to_ascii(int incr,char hdw[])100 lpfc_jedec_to_ascii(int incr, char hdw[])
101 {
102 	int i, j;
103 	for (i = 0; i < 8; i++) {
104 		j = (incr & 0xf);
105 		if (j <= 9)
106 			hdw[7 - i] = 0x30 +  j;
107 		 else
108 			hdw[7 - i] = 0x61 + j - 10;
109 		incr = (incr >> 4);
110 	}
111 	hdw[8] = 0;
112 	return;
113 }
114 
115 /**
116  * lpfc_drvr_version_show - Return the Emulex driver string with version number
117  * @dev: class unused variable.
118  * @attr: device attribute, not used.
119  * @buf: on return contains the module description text.
120  *
121  * Returns: size of formatted string.
122  **/
123 static ssize_t
lpfc_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)124 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
125 		       char *buf)
126 {
127 	return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
128 }
129 
130 /**
131  * lpfc_enable_fip_show - Return the fip mode of the HBA
132  * @dev: class unused variable.
133  * @attr: device attribute, not used.
134  * @buf: on return contains the module description text.
135  *
136  * Returns: size of formatted string.
137  **/
138 static ssize_t
lpfc_enable_fip_show(struct device * dev,struct device_attribute * attr,char * buf)139 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
140 		       char *buf)
141 {
142 	struct Scsi_Host *shost = class_to_shost(dev);
143 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
144 	struct lpfc_hba   *phba = vport->phba;
145 
146 	if (phba->hba_flag & HBA_FIP_SUPPORT)
147 		return scnprintf(buf, PAGE_SIZE, "1\n");
148 	else
149 		return scnprintf(buf, PAGE_SIZE, "0\n");
150 }
151 
152 static ssize_t
lpfc_nvme_info_show(struct device * dev,struct device_attribute * attr,char * buf)153 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
154 		    char *buf)
155 {
156 	struct Scsi_Host *shost = class_to_shost(dev);
157 	struct lpfc_vport *vport = shost_priv(shost);
158 	struct lpfc_hba   *phba = vport->phba;
159 	struct lpfc_nvmet_tgtport *tgtp;
160 	struct nvme_fc_local_port *localport;
161 	struct lpfc_nvme_lport *lport;
162 	struct lpfc_nvme_rport *rport;
163 	struct lpfc_nodelist *ndlp;
164 	struct nvme_fc_remote_port *nrport;
165 	struct lpfc_fc4_ctrl_stat *cstat;
166 	uint64_t data1, data2, data3;
167 	uint64_t totin, totout, tot;
168 	char *statep;
169 	int i;
170 	int len = 0;
171 	char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
172 
173 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
174 		len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
175 		return len;
176 	}
177 	if (phba->nvmet_support) {
178 		if (!phba->targetport) {
179 			len = scnprintf(buf, PAGE_SIZE,
180 					"NVME Target: x%llx is not allocated\n",
181 					wwn_to_u64(vport->fc_portname.u.wwn));
182 			return len;
183 		}
184 		/* Port state is only one of two values for now. */
185 		if (phba->targetport->port_id)
186 			statep = "REGISTERED";
187 		else
188 			statep = "INIT";
189 		scnprintf(tmp, sizeof(tmp),
190 			  "NVME Target Enabled  State %s\n",
191 			  statep);
192 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
193 			goto buffer_done;
194 
195 		scnprintf(tmp, sizeof(tmp),
196 			  "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
197 			  "NVME Target: lpfc",
198 			  phba->brd_no,
199 			  wwn_to_u64(vport->fc_portname.u.wwn),
200 			  wwn_to_u64(vport->fc_nodename.u.wwn),
201 			  phba->targetport->port_id);
202 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
203 			goto buffer_done;
204 
205 		if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
206 		    >= PAGE_SIZE)
207 			goto buffer_done;
208 
209 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
210 		scnprintf(tmp, sizeof(tmp),
211 			  "LS: Rcv %08x Drop %08x Abort %08x\n",
212 			  atomic_read(&tgtp->rcv_ls_req_in),
213 			  atomic_read(&tgtp->rcv_ls_req_drop),
214 			  atomic_read(&tgtp->xmt_ls_abort));
215 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
216 			goto buffer_done;
217 
218 		if (atomic_read(&tgtp->rcv_ls_req_in) !=
219 		    atomic_read(&tgtp->rcv_ls_req_out)) {
220 			scnprintf(tmp, sizeof(tmp),
221 				  "Rcv LS: in %08x != out %08x\n",
222 				  atomic_read(&tgtp->rcv_ls_req_in),
223 				  atomic_read(&tgtp->rcv_ls_req_out));
224 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
225 				goto buffer_done;
226 		}
227 
228 		scnprintf(tmp, sizeof(tmp),
229 			  "LS: Xmt %08x Drop %08x Cmpl %08x\n",
230 			  atomic_read(&tgtp->xmt_ls_rsp),
231 			  atomic_read(&tgtp->xmt_ls_drop),
232 			  atomic_read(&tgtp->xmt_ls_rsp_cmpl));
233 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
234 			goto buffer_done;
235 
236 		scnprintf(tmp, sizeof(tmp),
237 			  "LS: RSP Abort %08x xb %08x Err %08x\n",
238 			  atomic_read(&tgtp->xmt_ls_rsp_aborted),
239 			  atomic_read(&tgtp->xmt_ls_rsp_xb_set),
240 			  atomic_read(&tgtp->xmt_ls_rsp_error));
241 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
242 			goto buffer_done;
243 
244 		scnprintf(tmp, sizeof(tmp),
245 			  "FCP: Rcv %08x Defer %08x Release %08x "
246 			  "Drop %08x\n",
247 			  atomic_read(&tgtp->rcv_fcp_cmd_in),
248 			  atomic_read(&tgtp->rcv_fcp_cmd_defer),
249 			  atomic_read(&tgtp->xmt_fcp_release),
250 			  atomic_read(&tgtp->rcv_fcp_cmd_drop));
251 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
252 			goto buffer_done;
253 
254 		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
255 		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
256 			scnprintf(tmp, sizeof(tmp),
257 				  "Rcv FCP: in %08x != out %08x\n",
258 				  atomic_read(&tgtp->rcv_fcp_cmd_in),
259 				  atomic_read(&tgtp->rcv_fcp_cmd_out));
260 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
261 				goto buffer_done;
262 		}
263 
264 		scnprintf(tmp, sizeof(tmp),
265 			  "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
266 			  "drop %08x\n",
267 			  atomic_read(&tgtp->xmt_fcp_read),
268 			  atomic_read(&tgtp->xmt_fcp_read_rsp),
269 			  atomic_read(&tgtp->xmt_fcp_write),
270 			  atomic_read(&tgtp->xmt_fcp_rsp),
271 			  atomic_read(&tgtp->xmt_fcp_drop));
272 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
273 			goto buffer_done;
274 
275 		scnprintf(tmp, sizeof(tmp),
276 			  "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
277 			  atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
278 			  atomic_read(&tgtp->xmt_fcp_rsp_error),
279 			  atomic_read(&tgtp->xmt_fcp_rsp_drop));
280 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
281 			goto buffer_done;
282 
283 		scnprintf(tmp, sizeof(tmp),
284 			  "FCP Rsp Abort: %08x xb %08x xricqe  %08x\n",
285 			  atomic_read(&tgtp->xmt_fcp_rsp_aborted),
286 			  atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
287 			  atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
288 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
289 			goto buffer_done;
290 
291 		scnprintf(tmp, sizeof(tmp),
292 			  "ABORT: Xmt %08x Cmpl %08x\n",
293 			  atomic_read(&tgtp->xmt_fcp_abort),
294 			  atomic_read(&tgtp->xmt_fcp_abort_cmpl));
295 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
296 			goto buffer_done;
297 
298 		scnprintf(tmp, sizeof(tmp),
299 			  "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x\n",
300 			  atomic_read(&tgtp->xmt_abort_sol),
301 			  atomic_read(&tgtp->xmt_abort_unsol),
302 			  atomic_read(&tgtp->xmt_abort_rsp),
303 			  atomic_read(&tgtp->xmt_abort_rsp_error));
304 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
305 			goto buffer_done;
306 
307 		scnprintf(tmp, sizeof(tmp),
308 			  "DELAY: ctx %08x  fod %08x wqfull %08x\n",
309 			  atomic_read(&tgtp->defer_ctx),
310 			  atomic_read(&tgtp->defer_fod),
311 			  atomic_read(&tgtp->defer_wqfull));
312 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
313 			goto buffer_done;
314 
315 		/* Calculate outstanding IOs */
316 		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
317 		tot += atomic_read(&tgtp->xmt_fcp_release);
318 		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
319 
320 		scnprintf(tmp, sizeof(tmp),
321 			  "IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
322 			  "CTX Outstanding %08llx\n\n",
323 			  phba->sli4_hba.nvmet_xri_cnt,
324 			  phba->sli4_hba.nvmet_io_wait_cnt,
325 			  phba->sli4_hba.nvmet_io_wait_total,
326 			  tot);
327 		strlcat(buf, tmp, PAGE_SIZE);
328 		goto buffer_done;
329 	}
330 
331 	localport = vport->localport;
332 	if (!localport) {
333 		len = scnprintf(buf, PAGE_SIZE,
334 				"NVME Initiator x%llx is not allocated\n",
335 				wwn_to_u64(vport->fc_portname.u.wwn));
336 		return len;
337 	}
338 	lport = (struct lpfc_nvme_lport *)localport->private;
339 	if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
340 		goto buffer_done;
341 
342 	scnprintf(tmp, sizeof(tmp),
343 		  "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
344 		  phba->brd_no,
345 		  phba->sli4_hba.max_cfg_param.max_xri,
346 		  phba->sli4_hba.io_xri_max,
347 		  lpfc_sli4_get_els_iocb_cnt(phba));
348 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
349 		goto buffer_done;
350 
351 	/* Port state is only one of two values for now. */
352 	if (localport->port_id)
353 		statep = "ONLINE";
354 	else
355 		statep = "UNKNOWN ";
356 
357 	scnprintf(tmp, sizeof(tmp),
358 		  "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
359 		  "NVME LPORT lpfc",
360 		  phba->brd_no,
361 		  wwn_to_u64(vport->fc_portname.u.wwn),
362 		  wwn_to_u64(vport->fc_nodename.u.wwn),
363 		  localport->port_id, statep);
364 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
365 		goto buffer_done;
366 
367 	spin_lock_irq(shost->host_lock);
368 
369 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
370 		nrport = NULL;
371 		spin_lock(&ndlp->lock);
372 		rport = lpfc_ndlp_get_nrport(ndlp);
373 		if (rport)
374 			nrport = rport->remoteport;
375 		spin_unlock(&ndlp->lock);
376 		if (!nrport)
377 			continue;
378 
379 		/* Port state is only one of two values for now. */
380 		switch (nrport->port_state) {
381 		case FC_OBJSTATE_ONLINE:
382 			statep = "ONLINE";
383 			break;
384 		case FC_OBJSTATE_UNKNOWN:
385 			statep = "UNKNOWN ";
386 			break;
387 		default:
388 			statep = "UNSUPPORTED";
389 			break;
390 		}
391 
392 		/* Tab in to show lport ownership. */
393 		if (strlcat(buf, "NVME RPORT       ", PAGE_SIZE) >= PAGE_SIZE)
394 			goto unlock_buf_done;
395 		if (phba->brd_no >= 10) {
396 			if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
397 				goto unlock_buf_done;
398 		}
399 
400 		scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
401 			  nrport->port_name);
402 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
403 			goto unlock_buf_done;
404 
405 		scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
406 			  nrport->node_name);
407 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
408 			goto unlock_buf_done;
409 
410 		scnprintf(tmp, sizeof(tmp), "DID x%06x ",
411 			  nrport->port_id);
412 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
413 			goto unlock_buf_done;
414 
415 		/* An NVME rport can have multiple roles. */
416 		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
417 			if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
418 				goto unlock_buf_done;
419 		}
420 		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
421 			if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
422 				goto unlock_buf_done;
423 		}
424 		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
425 			if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
426 				goto unlock_buf_done;
427 		}
428 		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
429 					  FC_PORT_ROLE_NVME_TARGET |
430 					  FC_PORT_ROLE_NVME_DISCOVERY)) {
431 			scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
432 				  nrport->port_role);
433 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
434 				goto unlock_buf_done;
435 		}
436 
437 		scnprintf(tmp, sizeof(tmp), "%s\n", statep);
438 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
439 			goto unlock_buf_done;
440 	}
441 	spin_unlock_irq(shost->host_lock);
442 
443 	if (!lport)
444 		goto buffer_done;
445 
446 	if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
447 		goto buffer_done;
448 
449 	scnprintf(tmp, sizeof(tmp),
450 		  "LS: Xmt %010x Cmpl %010x Abort %08x\n",
451 		  atomic_read(&lport->fc4NvmeLsRequests),
452 		  atomic_read(&lport->fc4NvmeLsCmpls),
453 		  atomic_read(&lport->xmt_ls_abort));
454 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
455 		goto buffer_done;
456 
457 	scnprintf(tmp, sizeof(tmp),
458 		  "LS XMIT: Err %08x  CMPL: xb %08x Err %08x\n",
459 		  atomic_read(&lport->xmt_ls_err),
460 		  atomic_read(&lport->cmpl_ls_xb),
461 		  atomic_read(&lport->cmpl_ls_err));
462 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
463 		goto buffer_done;
464 
465 	totin = 0;
466 	totout = 0;
467 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
468 		cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
469 		tot = cstat->io_cmpls;
470 		totin += tot;
471 		data1 = cstat->input_requests;
472 		data2 = cstat->output_requests;
473 		data3 = cstat->control_requests;
474 		totout += (data1 + data2 + data3);
475 	}
476 	scnprintf(tmp, sizeof(tmp),
477 		  "Total FCP Cmpl %016llx Issue %016llx "
478 		  "OutIO %016llx\n",
479 		  totin, totout, totout - totin);
480 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
481 		goto buffer_done;
482 
483 	scnprintf(tmp, sizeof(tmp),
484 		  "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
485 		  "wqerr %08x err %08x\n",
486 		  atomic_read(&lport->xmt_fcp_abort),
487 		  atomic_read(&lport->xmt_fcp_noxri),
488 		  atomic_read(&lport->xmt_fcp_bad_ndlp),
489 		  atomic_read(&lport->xmt_fcp_qdepth),
490 		  atomic_read(&lport->xmt_fcp_wqerr),
491 		  atomic_read(&lport->xmt_fcp_err));
492 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
493 		goto buffer_done;
494 
495 	scnprintf(tmp, sizeof(tmp),
496 		  "FCP CMPL: xb %08x Err %08x\n",
497 		  atomic_read(&lport->cmpl_fcp_xb),
498 		  atomic_read(&lport->cmpl_fcp_err));
499 	strlcat(buf, tmp, PAGE_SIZE);
500 
501 	/* host_lock is already unlocked. */
502 	goto buffer_done;
503 
504  unlock_buf_done:
505 	spin_unlock_irq(shost->host_lock);
506 
507  buffer_done:
508 	len = strnlen(buf, PAGE_SIZE);
509 
510 	if (unlikely(len >= (PAGE_SIZE - 1))) {
511 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
512 				"6314 Catching potential buffer "
513 				"overflow > PAGE_SIZE = %lu bytes\n",
514 				PAGE_SIZE);
515 		strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
516 			LPFC_NVME_INFO_MORE_STR,
517 			sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
518 	}
519 
520 	return len;
521 }
522 
523 static ssize_t
lpfc_scsi_stat_show(struct device * dev,struct device_attribute * attr,char * buf)524 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
525 		    char *buf)
526 {
527 	struct Scsi_Host *shost = class_to_shost(dev);
528 	struct lpfc_vport *vport = shost_priv(shost);
529 	struct lpfc_hba *phba = vport->phba;
530 	int len;
531 	struct lpfc_fc4_ctrl_stat *cstat;
532 	u64 data1, data2, data3;
533 	u64 tot, totin, totout;
534 	int i;
535 	char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
536 
537 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
538 	    (phba->sli_rev != LPFC_SLI_REV4))
539 		return 0;
540 
541 	scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
542 
543 	totin = 0;
544 	totout = 0;
545 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
546 		cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
547 		tot = cstat->io_cmpls;
548 		totin += tot;
549 		data1 = cstat->input_requests;
550 		data2 = cstat->output_requests;
551 		data3 = cstat->control_requests;
552 		totout += (data1 + data2 + data3);
553 
554 		scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
555 			  "IO %016llx ", i, data1, data2, data3);
556 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
557 			goto buffer_done;
558 
559 		scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
560 			  tot, ((data1 + data2 + data3) - tot));
561 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
562 			goto buffer_done;
563 	}
564 	scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
565 		  "OutIO %016llx\n", totin, totout, totout - totin);
566 	strlcat(buf, tmp, PAGE_SIZE);
567 
568 buffer_done:
569 	len = strnlen(buf, PAGE_SIZE);
570 
571 	return len;
572 }
573 
574 static ssize_t
lpfc_bg_info_show(struct device * dev,struct device_attribute * attr,char * buf)575 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
576 		  char *buf)
577 {
578 	struct Scsi_Host *shost = class_to_shost(dev);
579 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
580 	struct lpfc_hba   *phba = vport->phba;
581 
582 	if (phba->cfg_enable_bg) {
583 		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
584 			return scnprintf(buf, PAGE_SIZE,
585 					"BlockGuard Enabled\n");
586 		else
587 			return scnprintf(buf, PAGE_SIZE,
588 					"BlockGuard Not Supported\n");
589 	} else
590 		return scnprintf(buf, PAGE_SIZE,
591 					"BlockGuard Disabled\n");
592 }
593 
594 static ssize_t
lpfc_bg_guard_err_show(struct device * dev,struct device_attribute * attr,char * buf)595 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
596 		       char *buf)
597 {
598 	struct Scsi_Host *shost = class_to_shost(dev);
599 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
600 	struct lpfc_hba   *phba = vport->phba;
601 
602 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
603 			(unsigned long long)phba->bg_guard_err_cnt);
604 }
605 
606 static ssize_t
lpfc_bg_apptag_err_show(struct device * dev,struct device_attribute * attr,char * buf)607 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
608 			char *buf)
609 {
610 	struct Scsi_Host *shost = class_to_shost(dev);
611 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
612 	struct lpfc_hba   *phba = vport->phba;
613 
614 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
615 			(unsigned long long)phba->bg_apptag_err_cnt);
616 }
617 
618 static ssize_t
lpfc_bg_reftag_err_show(struct device * dev,struct device_attribute * attr,char * buf)619 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
620 			char *buf)
621 {
622 	struct Scsi_Host *shost = class_to_shost(dev);
623 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
624 	struct lpfc_hba   *phba = vport->phba;
625 
626 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
627 			(unsigned long long)phba->bg_reftag_err_cnt);
628 }
629 
630 /**
631  * lpfc_info_show - Return some pci info about the host in ascii
632  * @dev: class converted to a Scsi_host structure.
633  * @attr: device attribute, not used.
634  * @buf: on return contains the formatted text from lpfc_info().
635  *
636  * Returns: size of formatted string.
637  **/
638 static ssize_t
lpfc_info_show(struct device * dev,struct device_attribute * attr,char * buf)639 lpfc_info_show(struct device *dev, struct device_attribute *attr,
640 	       char *buf)
641 {
642 	struct Scsi_Host *host = class_to_shost(dev);
643 
644 	return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
645 }
646 
647 /**
648  * lpfc_serialnum_show - Return the hba serial number in ascii
649  * @dev: class converted to a Scsi_host structure.
650  * @attr: device attribute, not used.
651  * @buf: on return contains the formatted text serial number.
652  *
653  * Returns: size of formatted string.
654  **/
655 static ssize_t
lpfc_serialnum_show(struct device * dev,struct device_attribute * attr,char * buf)656 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
657 		    char *buf)
658 {
659 	struct Scsi_Host  *shost = class_to_shost(dev);
660 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
661 	struct lpfc_hba   *phba = vport->phba;
662 
663 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
664 }
665 
666 /**
667  * lpfc_temp_sensor_show - Return the temperature sensor level
668  * @dev: class converted to a Scsi_host structure.
669  * @attr: device attribute, not used.
670  * @buf: on return contains the formatted support level.
671  *
672  * Description:
673  * Returns a number indicating the temperature sensor level currently
674  * supported, zero or one in ascii.
675  *
676  * Returns: size of formatted string.
677  **/
678 static ssize_t
lpfc_temp_sensor_show(struct device * dev,struct device_attribute * attr,char * buf)679 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
680 		      char *buf)
681 {
682 	struct Scsi_Host *shost = class_to_shost(dev);
683 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
684 	struct lpfc_hba   *phba = vport->phba;
685 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
686 }
687 
688 /**
689  * lpfc_modeldesc_show - Return the model description of the hba
690  * @dev: class converted to a Scsi_host structure.
691  * @attr: device attribute, not used.
692  * @buf: on return contains the scsi vpd model description.
693  *
694  * Returns: size of formatted string.
695  **/
696 static ssize_t
lpfc_modeldesc_show(struct device * dev,struct device_attribute * attr,char * buf)697 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
698 		    char *buf)
699 {
700 	struct Scsi_Host  *shost = class_to_shost(dev);
701 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
702 	struct lpfc_hba   *phba = vport->phba;
703 
704 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
705 }
706 
707 /**
708  * lpfc_modelname_show - Return the model name of the hba
709  * @dev: class converted to a Scsi_host structure.
710  * @attr: device attribute, not used.
711  * @buf: on return contains the scsi vpd model name.
712  *
713  * Returns: size of formatted string.
714  **/
715 static ssize_t
lpfc_modelname_show(struct device * dev,struct device_attribute * attr,char * buf)716 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
717 		    char *buf)
718 {
719 	struct Scsi_Host  *shost = class_to_shost(dev);
720 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
721 	struct lpfc_hba   *phba = vport->phba;
722 
723 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
724 }
725 
726 /**
727  * lpfc_programtype_show - Return the program type of the hba
728  * @dev: class converted to a Scsi_host structure.
729  * @attr: device attribute, not used.
730  * @buf: on return contains the scsi vpd program type.
731  *
732  * Returns: size of formatted string.
733  **/
734 static ssize_t
lpfc_programtype_show(struct device * dev,struct device_attribute * attr,char * buf)735 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
736 		      char *buf)
737 {
738 	struct Scsi_Host  *shost = class_to_shost(dev);
739 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
740 	struct lpfc_hba   *phba = vport->phba;
741 
742 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
743 }
744 
745 /**
746  * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
747  * @dev: class converted to a Scsi_host structure.
748  * @attr: device attribute, not used.
749  * @buf: on return contains the Menlo Maintenance sli flag.
750  *
751  * Returns: size of formatted string.
752  **/
753 static ssize_t
lpfc_mlomgmt_show(struct device * dev,struct device_attribute * attr,char * buf)754 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
755 {
756 	struct Scsi_Host  *shost = class_to_shost(dev);
757 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
758 	struct lpfc_hba   *phba = vport->phba;
759 
760 	return scnprintf(buf, PAGE_SIZE, "%d\n",
761 		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
762 }
763 
764 /**
765  * lpfc_vportnum_show - Return the port number in ascii of the hba
766  * @dev: class converted to a Scsi_host structure.
767  * @attr: device attribute, not used.
768  * @buf: on return contains scsi vpd program type.
769  *
770  * Returns: size of formatted string.
771  **/
772 static ssize_t
lpfc_vportnum_show(struct device * dev,struct device_attribute * attr,char * buf)773 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
774 		   char *buf)
775 {
776 	struct Scsi_Host  *shost = class_to_shost(dev);
777 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
778 	struct lpfc_hba   *phba = vport->phba;
779 
780 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
781 }
782 
783 /**
784  * lpfc_fwrev_show - Return the firmware rev running in the hba
785  * @dev: class converted to a Scsi_host structure.
786  * @attr: device attribute, not used.
787  * @buf: on return contains the scsi vpd program type.
788  *
789  * Returns: size of formatted string.
790  **/
791 static ssize_t
lpfc_fwrev_show(struct device * dev,struct device_attribute * attr,char * buf)792 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
793 		char *buf)
794 {
795 	struct Scsi_Host  *shost = class_to_shost(dev);
796 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
797 	struct lpfc_hba   *phba = vport->phba;
798 	uint32_t if_type;
799 	uint8_t sli_family;
800 	char fwrev[FW_REV_STR_SIZE];
801 	int len;
802 
803 	lpfc_decode_firmware_rev(phba, fwrev, 1);
804 	if_type = phba->sli4_hba.pc_sli4_params.if_type;
805 	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
806 
807 	if (phba->sli_rev < LPFC_SLI_REV4)
808 		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
809 			       fwrev, phba->sli_rev);
810 	else
811 		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
812 			       fwrev, phba->sli_rev, if_type, sli_family);
813 
814 	return len;
815 }
816 
817 /**
818  * lpfc_hdw_show - Return the jedec information about the hba
819  * @dev: class converted to a Scsi_host structure.
820  * @attr: device attribute, not used.
821  * @buf: on return contains the scsi vpd program type.
822  *
823  * Returns: size of formatted string.
824  **/
825 static ssize_t
lpfc_hdw_show(struct device * dev,struct device_attribute * attr,char * buf)826 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
827 {
828 	char hdw[9];
829 	struct Scsi_Host  *shost = class_to_shost(dev);
830 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
831 	struct lpfc_hba   *phba = vport->phba;
832 	lpfc_vpd_t *vp = &phba->vpd;
833 
834 	lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
835 	return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
836 			 vp->rev.smRev, vp->rev.smFwRev);
837 }
838 
839 /**
840  * lpfc_option_rom_version_show - Return the adapter ROM FCode version
841  * @dev: class converted to a Scsi_host structure.
842  * @attr: device attribute, not used.
843  * @buf: on return contains the ROM and FCode ascii strings.
844  *
845  * Returns: size of formatted string.
846  **/
847 static ssize_t
lpfc_option_rom_version_show(struct device * dev,struct device_attribute * attr,char * buf)848 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
849 			     char *buf)
850 {
851 	struct Scsi_Host  *shost = class_to_shost(dev);
852 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
853 	struct lpfc_hba   *phba = vport->phba;
854 	char fwrev[FW_REV_STR_SIZE];
855 
856 	if (phba->sli_rev < LPFC_SLI_REV4)
857 		return scnprintf(buf, PAGE_SIZE, "%s\n",
858 				phba->OptionROMVersion);
859 
860 	lpfc_decode_firmware_rev(phba, fwrev, 1);
861 	return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
862 }
863 
864 /**
865  * lpfc_link_state_show - Return the link state of the port
866  * @dev: class converted to a Scsi_host structure.
867  * @attr: device attribute, not used.
868  * @buf: on return contains text describing the state of the link.
869  *
870  * Notes:
871  * The switch statement has no default so zero will be returned.
872  *
873  * Returns: size of formatted string.
874  **/
875 static ssize_t
lpfc_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)876 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
877 		     char *buf)
878 {
879 	struct Scsi_Host  *shost = class_to_shost(dev);
880 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
881 	struct lpfc_hba   *phba = vport->phba;
882 	int  len = 0;
883 
884 	switch (phba->link_state) {
885 	case LPFC_LINK_UNKNOWN:
886 	case LPFC_WARM_START:
887 	case LPFC_INIT_START:
888 	case LPFC_INIT_MBX_CMDS:
889 	case LPFC_LINK_DOWN:
890 	case LPFC_HBA_ERROR:
891 		if (phba->hba_flag & LINK_DISABLED)
892 			len += scnprintf(buf + len, PAGE_SIZE-len,
893 				"Link Down - User disabled\n");
894 		else
895 			len += scnprintf(buf + len, PAGE_SIZE-len,
896 				"Link Down\n");
897 		break;
898 	case LPFC_LINK_UP:
899 	case LPFC_CLEAR_LA:
900 	case LPFC_HBA_READY:
901 		len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
902 
903 		switch (vport->port_state) {
904 		case LPFC_LOCAL_CFG_LINK:
905 			len += scnprintf(buf + len, PAGE_SIZE-len,
906 					"Configuring Link\n");
907 			break;
908 		case LPFC_FDISC:
909 		case LPFC_FLOGI:
910 		case LPFC_FABRIC_CFG_LINK:
911 		case LPFC_NS_REG:
912 		case LPFC_NS_QRY:
913 		case LPFC_BUILD_DISC_LIST:
914 		case LPFC_DISC_AUTH:
915 			len += scnprintf(buf + len, PAGE_SIZE - len,
916 					"Discovery\n");
917 			break;
918 		case LPFC_VPORT_READY:
919 			len += scnprintf(buf + len, PAGE_SIZE - len,
920 					"Ready\n");
921 			break;
922 
923 		case LPFC_VPORT_FAILED:
924 			len += scnprintf(buf + len, PAGE_SIZE - len,
925 					"Failed\n");
926 			break;
927 
928 		case LPFC_VPORT_UNKNOWN:
929 			len += scnprintf(buf + len, PAGE_SIZE - len,
930 					"Unknown\n");
931 			break;
932 		}
933 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
934 			len += scnprintf(buf + len, PAGE_SIZE-len,
935 					"   Menlo Maint Mode\n");
936 		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
937 			if (vport->fc_flag & FC_PUBLIC_LOOP)
938 				len += scnprintf(buf + len, PAGE_SIZE-len,
939 						"   Public Loop\n");
940 			else
941 				len += scnprintf(buf + len, PAGE_SIZE-len,
942 						"   Private Loop\n");
943 		} else {
944 			if (vport->fc_flag & FC_FABRIC)
945 				len += scnprintf(buf + len, PAGE_SIZE-len,
946 						"   Fabric\n");
947 			else
948 				len += scnprintf(buf + len, PAGE_SIZE-len,
949 						"   Point-2-Point\n");
950 		}
951 	}
952 
953 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
954 	    ((bf_get(lpfc_sli_intf_if_type,
955 	     &phba->sli4_hba.sli_intf) ==
956 	     LPFC_SLI_INTF_IF_TYPE_6))) {
957 		struct lpfc_trunk_link link = phba->trunk_link;
958 
959 		if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
960 			len += scnprintf(buf + len, PAGE_SIZE - len,
961 				"Trunk port 0: Link %s %s\n",
962 				(link.link0.state == LPFC_LINK_UP) ?
963 				 "Up" : "Down. ",
964 				trunk_errmsg[link.link0.fault]);
965 
966 		if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
967 			len += scnprintf(buf + len, PAGE_SIZE - len,
968 				"Trunk port 1: Link %s %s\n",
969 				(link.link1.state == LPFC_LINK_UP) ?
970 				 "Up" : "Down. ",
971 				trunk_errmsg[link.link1.fault]);
972 
973 		if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
974 			len += scnprintf(buf + len, PAGE_SIZE - len,
975 				"Trunk port 2: Link %s %s\n",
976 				(link.link2.state == LPFC_LINK_UP) ?
977 				 "Up" : "Down. ",
978 				trunk_errmsg[link.link2.fault]);
979 
980 		if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
981 			len += scnprintf(buf + len, PAGE_SIZE - len,
982 				"Trunk port 3: Link %s %s\n",
983 				(link.link3.state == LPFC_LINK_UP) ?
984 				 "Up" : "Down. ",
985 				trunk_errmsg[link.link3.fault]);
986 
987 	}
988 
989 	return len;
990 }
991 
992 /**
993  * lpfc_sli4_protocol_show - Return the fip mode of the HBA
994  * @dev: class unused variable.
995  * @attr: device attribute, not used.
996  * @buf: on return contains the module description text.
997  *
998  * Returns: size of formatted string.
999  **/
1000 static ssize_t
lpfc_sli4_protocol_show(struct device * dev,struct device_attribute * attr,char * buf)1001 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1002 			char *buf)
1003 {
1004 	struct Scsi_Host *shost = class_to_shost(dev);
1005 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1006 	struct lpfc_hba *phba = vport->phba;
1007 
1008 	if (phba->sli_rev < LPFC_SLI_REV4)
1009 		return scnprintf(buf, PAGE_SIZE, "fc\n");
1010 
1011 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1012 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1013 			return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1014 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1015 			return scnprintf(buf, PAGE_SIZE, "fc\n");
1016 	}
1017 	return scnprintf(buf, PAGE_SIZE, "unknown\n");
1018 }
1019 
1020 /**
1021  * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1022  *			    (OAS) is supported.
1023  * @dev: class unused variable.
1024  * @attr: device attribute, not used.
1025  * @buf: on return contains the module description text.
1026  *
1027  * Returns: size of formatted string.
1028  **/
1029 static ssize_t
lpfc_oas_supported_show(struct device * dev,struct device_attribute * attr,char * buf)1030 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1031 			char *buf)
1032 {
1033 	struct Scsi_Host *shost = class_to_shost(dev);
1034 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1035 	struct lpfc_hba *phba = vport->phba;
1036 
1037 	return scnprintf(buf, PAGE_SIZE, "%d\n",
1038 			phba->sli4_hba.pc_sli4_params.oas_supported);
1039 }
1040 
1041 /**
1042  * lpfc_link_state_store - Transition the link_state on an HBA port
1043  * @dev: class device that is converted into a Scsi_host.
1044  * @attr: device attribute, not used.
1045  * @buf: one or more lpfc_polling_flags values.
1046  * @count: not used.
1047  *
1048  * Returns:
1049  * -EINVAL if the buffer is not "up" or "down"
1050  * return from link state change function if non-zero
1051  * length of the buf on success
1052  **/
1053 static ssize_t
lpfc_link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1054 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1055 		const char *buf, size_t count)
1056 {
1057 	struct Scsi_Host  *shost = class_to_shost(dev);
1058 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1059 	struct lpfc_hba   *phba = vport->phba;
1060 
1061 	int status = -EINVAL;
1062 
1063 	if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1064 			(phba->link_state == LPFC_LINK_DOWN))
1065 		status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1066 	else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1067 			(phba->link_state >= LPFC_LINK_UP))
1068 		status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1069 
1070 	if (status == 0)
1071 		return strlen(buf);
1072 	else
1073 		return status;
1074 }
1075 
1076 /**
1077  * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1078  * @dev: class device that is converted into a Scsi_host.
1079  * @attr: device attribute, not used.
1080  * @buf: on return contains the sum of fc mapped and unmapped.
1081  *
1082  * Description:
1083  * Returns the ascii text number of the sum of the fc mapped and unmapped
1084  * vport counts.
1085  *
1086  * Returns: size of formatted string.
1087  **/
1088 static ssize_t
lpfc_num_discovered_ports_show(struct device * dev,struct device_attribute * attr,char * buf)1089 lpfc_num_discovered_ports_show(struct device *dev,
1090 			       struct device_attribute *attr, char *buf)
1091 {
1092 	struct Scsi_Host  *shost = class_to_shost(dev);
1093 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1094 
1095 	return scnprintf(buf, PAGE_SIZE, "%d\n",
1096 			vport->fc_map_cnt + vport->fc_unmap_cnt);
1097 }
1098 
1099 /**
1100  * lpfc_issue_lip - Misnomer, name carried over from long ago
1101  * @shost: Scsi_Host pointer.
1102  *
1103  * Description:
1104  * Bring the link down gracefully then re-init the link. The firmware will
1105  * re-init the fiber channel interface as required. Does not issue a LIP.
1106  *
1107  * Returns:
1108  * -EPERM port offline or management commands are being blocked
1109  * -ENOMEM cannot allocate memory for the mailbox command
1110  * -EIO error sending the mailbox command
1111  * zero for success
1112  **/
1113 static int
lpfc_issue_lip(struct Scsi_Host * shost)1114 lpfc_issue_lip(struct Scsi_Host *shost)
1115 {
1116 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1117 	struct lpfc_hba   *phba = vport->phba;
1118 	LPFC_MBOXQ_t *pmboxq;
1119 	int mbxstatus = MBXERR_ERROR;
1120 
1121 	/*
1122 	 * If the link is offline, disabled or BLOCK_MGMT_IO
1123 	 * it doesn't make any sense to allow issue_lip
1124 	 */
1125 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1126 	    (phba->hba_flag & LINK_DISABLED) ||
1127 	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1128 		return -EPERM;
1129 
1130 	pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1131 
1132 	if (!pmboxq)
1133 		return -ENOMEM;
1134 
1135 	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1136 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1137 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1138 
1139 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1140 
1141 	if ((mbxstatus == MBX_SUCCESS) &&
1142 	    (pmboxq->u.mb.mbxStatus == 0 ||
1143 	     pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1144 		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1145 		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1146 			       phba->cfg_link_speed);
1147 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1148 						     phba->fc_ratov * 2);
1149 		if ((mbxstatus == MBX_SUCCESS) &&
1150 		    (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1151 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1152 					"2859 SLI authentication is required "
1153 					"for INIT_LINK but has not done yet\n");
1154 	}
1155 
1156 	lpfc_set_loopback_flag(phba);
1157 	if (mbxstatus != MBX_TIMEOUT)
1158 		mempool_free(pmboxq, phba->mbox_mem_pool);
1159 
1160 	if (mbxstatus == MBXERR_ERROR)
1161 		return -EIO;
1162 
1163 	return 0;
1164 }
1165 
1166 int
lpfc_emptyq_wait(struct lpfc_hba * phba,struct list_head * q,spinlock_t * lock)1167 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1168 {
1169 	int cnt = 0;
1170 
1171 	spin_lock_irq(lock);
1172 	while (!list_empty(q)) {
1173 		spin_unlock_irq(lock);
1174 		msleep(20);
1175 		if (cnt++ > 250) {  /* 5 secs */
1176 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1177 					"0466 Outstanding IO when "
1178 					"bringing Adapter offline\n");
1179 				return 0;
1180 		}
1181 		spin_lock_irq(lock);
1182 	}
1183 	spin_unlock_irq(lock);
1184 	return 1;
1185 }
1186 
1187 /**
1188  * lpfc_do_offline - Issues a mailbox command to bring the link down
1189  * @phba: lpfc_hba pointer.
1190  * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1191  *
1192  * Notes:
1193  * Assumes any error from lpfc_do_offline() will be negative.
1194  * Can wait up to 5 seconds for the port ring buffers count
1195  * to reach zero, prints a warning if it is not zero and continues.
1196  * lpfc_workq_post_event() returns a non-zero return code if call fails.
1197  *
1198  * Returns:
1199  * -EIO error posting the event
1200  * zero for success
1201  **/
1202 static int
lpfc_do_offline(struct lpfc_hba * phba,uint32_t type)1203 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1204 {
1205 	struct completion online_compl;
1206 	struct lpfc_queue *qp = NULL;
1207 	struct lpfc_sli_ring *pring;
1208 	struct lpfc_sli *psli;
1209 	int status = 0;
1210 	int i;
1211 	int rc;
1212 
1213 	init_completion(&online_compl);
1214 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
1215 			      LPFC_EVT_OFFLINE_PREP);
1216 	if (rc == 0)
1217 		return -ENOMEM;
1218 
1219 	wait_for_completion(&online_compl);
1220 
1221 	if (status != 0)
1222 		return -EIO;
1223 
1224 	psli = &phba->sli;
1225 
1226 	/*
1227 	 * If freeing the queues have already started, don't access them.
1228 	 * Otherwise set FREE_WAIT to indicate that queues are being used
1229 	 * to hold the freeing process until we finish.
1230 	 */
1231 	spin_lock_irq(&phba->hbalock);
1232 	if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1233 		psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1234 	} else {
1235 		spin_unlock_irq(&phba->hbalock);
1236 		goto skip_wait;
1237 	}
1238 	spin_unlock_irq(&phba->hbalock);
1239 
1240 	/* Wait a little for things to settle down, but not
1241 	 * long enough for dev loss timeout to expire.
1242 	 */
1243 	if (phba->sli_rev != LPFC_SLI_REV4) {
1244 		for (i = 0; i < psli->num_rings; i++) {
1245 			pring = &psli->sli3_ring[i];
1246 			if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1247 					      &phba->hbalock))
1248 				goto out;
1249 		}
1250 	} else {
1251 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1252 			pring = qp->pring;
1253 			if (!pring)
1254 				continue;
1255 			if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1256 					      &pring->ring_lock))
1257 				goto out;
1258 		}
1259 	}
1260 out:
1261 	spin_lock_irq(&phba->hbalock);
1262 	psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1263 	spin_unlock_irq(&phba->hbalock);
1264 
1265 skip_wait:
1266 	init_completion(&online_compl);
1267 	rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1268 	if (rc == 0)
1269 		return -ENOMEM;
1270 
1271 	wait_for_completion(&online_compl);
1272 
1273 	if (status != 0)
1274 		return -EIO;
1275 
1276 	return 0;
1277 }
1278 
1279 /**
1280  * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1281  * @phba: lpfc_hba pointer.
1282  *
1283  * Description:
1284  * Issues a PCI secondary bus reset for the phba->pcidev.
1285  *
1286  * Notes:
1287  * First walks the bus_list to ensure only PCI devices with Emulex
1288  * vendor id, device ids that support hot reset, only one occurrence
1289  * of function 0, and all ports on the bus are in offline mode to ensure the
1290  * hot reset only affects one valid HBA.
1291  *
1292  * Returns:
1293  * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1294  * -ENODEV,   NULL ptr to pcidev
1295  * -EBADSLT,  detected invalid device
1296  * -EBUSY,    port is not in offline state
1297  *      0,    successful
1298  */
1299 static int
lpfc_reset_pci_bus(struct lpfc_hba * phba)1300 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1301 {
1302 	struct pci_dev *pdev = phba->pcidev;
1303 	struct Scsi_Host *shost = NULL;
1304 	struct lpfc_hba *phba_other = NULL;
1305 	struct pci_dev *ptr = NULL;
1306 	int res;
1307 
1308 	if (phba->cfg_enable_hba_reset != 2)
1309 		return -ENOTSUPP;
1310 
1311 	if (!pdev) {
1312 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1313 		return -ENODEV;
1314 	}
1315 
1316 	res = lpfc_check_pci_resettable(phba);
1317 	if (res)
1318 		return res;
1319 
1320 	/* Walk the list of devices on the pci_dev's bus */
1321 	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1322 		/* Check port is offline */
1323 		shost = pci_get_drvdata(ptr);
1324 		if (shost) {
1325 			phba_other =
1326 				((struct lpfc_vport *)shost->hostdata)->phba;
1327 			if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1328 				lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1329 						"8349 WWPN = 0x%02x%02x%02x%02x"
1330 						"%02x%02x%02x%02x is not "
1331 						"offline!\n",
1332 						phba_other->wwpn[0],
1333 						phba_other->wwpn[1],
1334 						phba_other->wwpn[2],
1335 						phba_other->wwpn[3],
1336 						phba_other->wwpn[4],
1337 						phba_other->wwpn[5],
1338 						phba_other->wwpn[6],
1339 						phba_other->wwpn[7]);
1340 				return -EBUSY;
1341 			}
1342 		}
1343 	}
1344 
1345 	/* Issue PCI bus reset */
1346 	res = pci_reset_bus(pdev);
1347 	if (res) {
1348 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1349 				"8350 PCI reset bus failed: %d\n", res);
1350 	}
1351 
1352 	return res;
1353 }
1354 
1355 /**
1356  * lpfc_selective_reset - Offline then onlines the port
1357  * @phba: lpfc_hba pointer.
1358  *
1359  * Description:
1360  * If the port is configured to allow a reset then the hba is brought
1361  * offline then online.
1362  *
1363  * Notes:
1364  * Assumes any error from lpfc_do_offline() will be negative.
1365  * Do not make this function static.
1366  *
1367  * Returns:
1368  * lpfc_do_offline() return code if not zero
1369  * -EIO reset not configured or error posting the event
1370  * zero for success
1371  **/
1372 int
lpfc_selective_reset(struct lpfc_hba * phba)1373 lpfc_selective_reset(struct lpfc_hba *phba)
1374 {
1375 	struct completion online_compl;
1376 	int status = 0;
1377 	int rc;
1378 
1379 	if (!phba->cfg_enable_hba_reset)
1380 		return -EACCES;
1381 
1382 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1383 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1384 
1385 		if (status != 0)
1386 			return status;
1387 	}
1388 
1389 	init_completion(&online_compl);
1390 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
1391 			      LPFC_EVT_ONLINE);
1392 	if (rc == 0)
1393 		return -ENOMEM;
1394 
1395 	wait_for_completion(&online_compl);
1396 
1397 	if (status != 0)
1398 		return -EIO;
1399 
1400 	return 0;
1401 }
1402 
1403 /**
1404  * lpfc_issue_reset - Selectively resets an adapter
1405  * @dev: class device that is converted into a Scsi_host.
1406  * @attr: device attribute, not used.
1407  * @buf: containing the string "selective".
1408  * @count: unused variable.
1409  *
1410  * Description:
1411  * If the buf contains the string "selective" then lpfc_selective_reset()
1412  * is called to perform the reset.
1413  *
1414  * Notes:
1415  * Assumes any error from lpfc_selective_reset() will be negative.
1416  * If lpfc_selective_reset() returns zero then the length of the buffer
1417  * is returned which indicates success
1418  *
1419  * Returns:
1420  * -EINVAL if the buffer does not contain the string "selective"
1421  * length of buf if lpfc-selective_reset() if the call succeeds
1422  * return value of lpfc_selective_reset() if the call fails
1423 **/
1424 static ssize_t
lpfc_issue_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1425 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1426 		 const char *buf, size_t count)
1427 {
1428 	struct Scsi_Host  *shost = class_to_shost(dev);
1429 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1430 	struct lpfc_hba   *phba = vport->phba;
1431 	int status = -EINVAL;
1432 
1433 	if (!phba->cfg_enable_hba_reset)
1434 		return -EACCES;
1435 
1436 	if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1437 		status = phba->lpfc_selective_reset(phba);
1438 
1439 	if (status == 0)
1440 		return strlen(buf);
1441 	else
1442 		return status;
1443 }
1444 
1445 /**
1446  * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1447  * @phba: lpfc_hba pointer.
1448  *
1449  * Description:
1450  * SLI4 interface type-2 device to wait on the sliport status register for
1451  * the readyness after performing a firmware reset.
1452  *
1453  * Returns:
1454  * zero for success, -EPERM when port does not have privilege to perform the
1455  * reset, -EIO when port timeout from recovering from the reset.
1456  *
1457  * Note:
1458  * As the caller will interpret the return code by value, be careful in making
1459  * change or addition to return codes.
1460  **/
1461 int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba * phba)1462 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1463 {
1464 	struct lpfc_register portstat_reg = {0};
1465 	int i;
1466 
1467 	msleep(100);
1468 	if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1469 		       &portstat_reg.word0))
1470 		return -EIO;
1471 
1472 	/* verify if privileged for the request operation */
1473 	if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1474 	    !bf_get(lpfc_sliport_status_err, &portstat_reg))
1475 		return -EPERM;
1476 
1477 	/* wait for the SLI port firmware ready after firmware reset */
1478 	for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1479 		msleep(10);
1480 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1481 			       &portstat_reg.word0))
1482 			continue;
1483 		if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1484 			continue;
1485 		if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1486 			continue;
1487 		if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1488 			continue;
1489 		break;
1490 	}
1491 
1492 	if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1493 		return 0;
1494 	else
1495 		return -EIO;
1496 }
1497 
1498 /**
1499  * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1500  * @phba: lpfc_hba pointer.
1501  * @opcode: The sli4 config command opcode.
1502  *
1503  * Description:
1504  * Request SLI4 interface type-2 device to perform a physical register set
1505  * access.
1506  *
1507  * Returns:
1508  * zero for success
1509  **/
1510 static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba * phba,uint32_t opcode)1511 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1512 {
1513 	struct completion online_compl;
1514 	struct pci_dev *pdev = phba->pcidev;
1515 	uint32_t before_fc_flag;
1516 	uint32_t sriov_nr_virtfn;
1517 	uint32_t reg_val;
1518 	int status = 0, rc = 0;
1519 	int job_posted = 1, sriov_err;
1520 
1521 	if (!phba->cfg_enable_hba_reset)
1522 		return -EACCES;
1523 
1524 	if ((phba->sli_rev < LPFC_SLI_REV4) ||
1525 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1526 	     LPFC_SLI_INTF_IF_TYPE_2))
1527 		return -EPERM;
1528 
1529 	/* Keep state if we need to restore back */
1530 	before_fc_flag = phba->pport->fc_flag;
1531 	sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1532 
1533 	/* Disable SR-IOV virtual functions if enabled */
1534 	if (phba->cfg_sriov_nr_virtfn) {
1535 		pci_disable_sriov(pdev);
1536 		phba->cfg_sriov_nr_virtfn = 0;
1537 	}
1538 
1539 	if (opcode == LPFC_FW_DUMP)
1540 		phba->hba_flag |= HBA_FW_DUMP_OP;
1541 
1542 	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1543 
1544 	if (status != 0) {
1545 		phba->hba_flag &= ~HBA_FW_DUMP_OP;
1546 		return status;
1547 	}
1548 
1549 	/* wait for the device to be quiesced before firmware reset */
1550 	msleep(100);
1551 
1552 	reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1553 			LPFC_CTL_PDEV_CTL_OFFSET);
1554 
1555 	if (opcode == LPFC_FW_DUMP)
1556 		reg_val |= LPFC_FW_DUMP_REQUEST;
1557 	else if (opcode == LPFC_FW_RESET)
1558 		reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1559 	else if (opcode == LPFC_DV_RESET)
1560 		reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1561 
1562 	writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1563 	       LPFC_CTL_PDEV_CTL_OFFSET);
1564 	/* flush */
1565 	readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1566 
1567 	/* delay driver action following IF_TYPE_2 reset */
1568 	rc = lpfc_sli4_pdev_status_reg_wait(phba);
1569 
1570 	if (rc == -EPERM) {
1571 		/* no privilege for reset */
1572 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1573 				"3150 No privilege to perform the requested "
1574 				"access: x%x\n", reg_val);
1575 	} else if (rc == -EIO) {
1576 		/* reset failed, there is nothing more we can do */
1577 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1578 				"3153 Fail to perform the requested "
1579 				"access: x%x\n", reg_val);
1580 		return rc;
1581 	}
1582 
1583 	/* keep the original port state */
1584 	if (before_fc_flag & FC_OFFLINE_MODE)
1585 		goto out;
1586 
1587 	init_completion(&online_compl);
1588 	job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1589 					   LPFC_EVT_ONLINE);
1590 	if (!job_posted)
1591 		goto out;
1592 
1593 	wait_for_completion(&online_compl);
1594 
1595 out:
1596 	/* in any case, restore the virtual functions enabled as before */
1597 	if (sriov_nr_virtfn) {
1598 		sriov_err =
1599 			lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1600 		if (!sriov_err)
1601 			phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1602 	}
1603 
1604 	/* return proper error code */
1605 	if (!rc) {
1606 		if (!job_posted)
1607 			rc = -ENOMEM;
1608 		else if (status)
1609 			rc = -EIO;
1610 	}
1611 	return rc;
1612 }
1613 
1614 /**
1615  * lpfc_nport_evt_cnt_show - Return the number of nport events
1616  * @dev: class device that is converted into a Scsi_host.
1617  * @attr: device attribute, not used.
1618  * @buf: on return contains the ascii number of nport events.
1619  *
1620  * Returns: size of formatted string.
1621  **/
1622 static ssize_t
lpfc_nport_evt_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)1623 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1624 			char *buf)
1625 {
1626 	struct Scsi_Host  *shost = class_to_shost(dev);
1627 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1628 	struct lpfc_hba   *phba = vport->phba;
1629 
1630 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1631 }
1632 
1633 static int
lpfc_set_trunking(struct lpfc_hba * phba,char * buff_out)1634 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1635 {
1636 	LPFC_MBOXQ_t *mbox = NULL;
1637 	unsigned long val = 0;
1638 	char *pval = NULL;
1639 	int rc = 0;
1640 
1641 	if (!strncmp("enable", buff_out,
1642 				 strlen("enable"))) {
1643 		pval = buff_out + strlen("enable") + 1;
1644 		rc = kstrtoul(pval, 0, &val);
1645 		if (rc)
1646 			return rc; /* Invalid  number */
1647 	} else if (!strncmp("disable", buff_out,
1648 				 strlen("disable"))) {
1649 		val = 0;
1650 	} else {
1651 		return -EINVAL;  /* Invalid command */
1652 	}
1653 
1654 	switch (val) {
1655 	case 0:
1656 		val = 0x0; /* Disable */
1657 		break;
1658 	case 2:
1659 		val = 0x1; /* Enable two port trunk */
1660 		break;
1661 	case 4:
1662 		val = 0x2; /* Enable four port trunk */
1663 		break;
1664 	default:
1665 		return -EINVAL;
1666 	}
1667 
1668 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1669 			"0070 Set trunk mode with val %ld ", val);
1670 
1671 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1672 	if (!mbox)
1673 		return -ENOMEM;
1674 
1675 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1676 			 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1677 			 12, LPFC_SLI4_MBX_EMBED);
1678 
1679 	bf_set(lpfc_mbx_set_trunk_mode,
1680 	       &mbox->u.mqe.un.set_trunk_mode,
1681 	       val);
1682 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1683 	if (rc)
1684 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1685 				"0071 Set trunk mode failed with status: %d",
1686 				rc);
1687 	mempool_free(mbox, phba->mbox_mem_pool);
1688 
1689 	return 0;
1690 }
1691 
1692 /**
1693  * lpfc_board_mode_show - Return the state of the board
1694  * @dev: class device that is converted into a Scsi_host.
1695  * @attr: device attribute, not used.
1696  * @buf: on return contains the state of the adapter.
1697  *
1698  * Returns: size of formatted string.
1699  **/
1700 static ssize_t
lpfc_board_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1701 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1702 		     char *buf)
1703 {
1704 	struct Scsi_Host  *shost = class_to_shost(dev);
1705 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1706 	struct lpfc_hba   *phba = vport->phba;
1707 	char  * state;
1708 
1709 	if (phba->link_state == LPFC_HBA_ERROR)
1710 		state = "error";
1711 	else if (phba->link_state == LPFC_WARM_START)
1712 		state = "warm start";
1713 	else if (phba->link_state == LPFC_INIT_START)
1714 		state = "offline";
1715 	else
1716 		state = "online";
1717 
1718 	return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1719 }
1720 
1721 /**
1722  * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1723  * @dev: class device that is converted into a Scsi_host.
1724  * @attr: device attribute, not used.
1725  * @buf: containing one of the strings "online", "offline", "warm" or "error".
1726  * @count: unused variable.
1727  *
1728  * Returns:
1729  * -EACCES if enable hba reset not enabled
1730  * -EINVAL if the buffer does not contain a valid string (see above)
1731  * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1732  * buf length greater than zero indicates success
1733  **/
1734 static ssize_t
lpfc_board_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1735 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1736 		      const char *buf, size_t count)
1737 {
1738 	struct Scsi_Host  *shost = class_to_shost(dev);
1739 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1740 	struct lpfc_hba   *phba = vport->phba;
1741 	struct completion online_compl;
1742 	char *board_mode_str = NULL;
1743 	int status = 0;
1744 	int rc;
1745 
1746 	if (!phba->cfg_enable_hba_reset) {
1747 		status = -EACCES;
1748 		goto board_mode_out;
1749 	}
1750 
1751 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1752 			 "3050 lpfc_board_mode set to %s\n", buf);
1753 
1754 	init_completion(&online_compl);
1755 
1756 	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1757 		rc = lpfc_workq_post_event(phba, &status, &online_compl,
1758 				      LPFC_EVT_ONLINE);
1759 		if (rc == 0) {
1760 			status = -ENOMEM;
1761 			goto board_mode_out;
1762 		}
1763 		wait_for_completion(&online_compl);
1764 		if (status)
1765 			status = -EIO;
1766 	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1767 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1768 	else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1769 		if (phba->sli_rev == LPFC_SLI_REV4)
1770 			status = -EINVAL;
1771 		else
1772 			status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1773 	else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1774 		if (phba->sli_rev == LPFC_SLI_REV4)
1775 			status = -EINVAL;
1776 		else
1777 			status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1778 	else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1779 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1780 	else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1781 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1782 	else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1783 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1784 	else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1785 		 == 0)
1786 		status = lpfc_reset_pci_bus(phba);
1787 	else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
1788 		lpfc_issue_hb_tmo(phba);
1789 	else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1790 		status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1791 	else
1792 		status = -EINVAL;
1793 
1794 board_mode_out:
1795 	if (!status)
1796 		return strlen(buf);
1797 	else {
1798 		board_mode_str = strchr(buf, '\n');
1799 		if (board_mode_str)
1800 			*board_mode_str = '\0';
1801 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1802 				 "3097 Failed \"%s\", status(%d), "
1803 				 "fc_flag(x%x)\n",
1804 				 buf, status, phba->pport->fc_flag);
1805 		return status;
1806 	}
1807 }
1808 
1809 /**
1810  * lpfc_get_hba_info - Return various bits of informaton about the adapter
1811  * @phba: pointer to the adapter structure.
1812  * @mxri: max xri count.
1813  * @axri: available xri count.
1814  * @mrpi: max rpi count.
1815  * @arpi: available rpi count.
1816  * @mvpi: max vpi count.
1817  * @avpi: available vpi count.
1818  *
1819  * Description:
1820  * If an integer pointer for an count is not null then the value for the
1821  * count is returned.
1822  *
1823  * Returns:
1824  * zero on error
1825  * one for success
1826  **/
1827 static int
lpfc_get_hba_info(struct lpfc_hba * phba,uint32_t * mxri,uint32_t * axri,uint32_t * mrpi,uint32_t * arpi,uint32_t * mvpi,uint32_t * avpi)1828 lpfc_get_hba_info(struct lpfc_hba *phba,
1829 		  uint32_t *mxri, uint32_t *axri,
1830 		  uint32_t *mrpi, uint32_t *arpi,
1831 		  uint32_t *mvpi, uint32_t *avpi)
1832 {
1833 	struct lpfc_mbx_read_config *rd_config;
1834 	LPFC_MBOXQ_t *pmboxq;
1835 	MAILBOX_t *pmb;
1836 	int rc = 0;
1837 	uint32_t max_vpi;
1838 
1839 	/*
1840 	 * prevent udev from issuing mailbox commands until the port is
1841 	 * configured.
1842 	 */
1843 	if (phba->link_state < LPFC_LINK_DOWN ||
1844 	    !phba->mbox_mem_pool ||
1845 	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
1846 		return 0;
1847 
1848 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1849 		return 0;
1850 
1851 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1852 	if (!pmboxq)
1853 		return 0;
1854 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1855 
1856 	pmb = &pmboxq->u.mb;
1857 	pmb->mbxCommand = MBX_READ_CONFIG;
1858 	pmb->mbxOwner = OWN_HOST;
1859 	pmboxq->ctx_buf = NULL;
1860 
1861 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1862 		rc = MBX_NOT_FINISHED;
1863 	else
1864 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1865 
1866 	if (rc != MBX_SUCCESS) {
1867 		if (rc != MBX_TIMEOUT)
1868 			mempool_free(pmboxq, phba->mbox_mem_pool);
1869 		return 0;
1870 	}
1871 
1872 	if (phba->sli_rev == LPFC_SLI_REV4) {
1873 		rd_config = &pmboxq->u.mqe.un.rd_config;
1874 		if (mrpi)
1875 			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
1876 		if (arpi)
1877 			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
1878 					phba->sli4_hba.max_cfg_param.rpi_used;
1879 		if (mxri)
1880 			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
1881 		if (axri)
1882 			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
1883 					phba->sli4_hba.max_cfg_param.xri_used;
1884 
1885 		/* Account for differences with SLI-3.  Get vpi count from
1886 		 * mailbox data and subtract one for max vpi value.
1887 		 */
1888 		max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1889 			(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1890 
1891 		/* Limit the max we support */
1892 		if (max_vpi > LPFC_MAX_VPI)
1893 			max_vpi = LPFC_MAX_VPI;
1894 		if (mvpi)
1895 			*mvpi = max_vpi;
1896 		if (avpi)
1897 			*avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
1898 	} else {
1899 		if (mrpi)
1900 			*mrpi = pmb->un.varRdConfig.max_rpi;
1901 		if (arpi)
1902 			*arpi = pmb->un.varRdConfig.avail_rpi;
1903 		if (mxri)
1904 			*mxri = pmb->un.varRdConfig.max_xri;
1905 		if (axri)
1906 			*axri = pmb->un.varRdConfig.avail_xri;
1907 		if (mvpi)
1908 			*mvpi = pmb->un.varRdConfig.max_vpi;
1909 		if (avpi) {
1910 			/* avail_vpi is only valid if link is up and ready */
1911 			if (phba->link_state == LPFC_HBA_READY)
1912 				*avpi = pmb->un.varRdConfig.avail_vpi;
1913 			else
1914 				*avpi = pmb->un.varRdConfig.max_vpi;
1915 		}
1916 	}
1917 
1918 	mempool_free(pmboxq, phba->mbox_mem_pool);
1919 	return 1;
1920 }
1921 
1922 /**
1923  * lpfc_max_rpi_show - Return maximum rpi
1924  * @dev: class device that is converted into a Scsi_host.
1925  * @attr: device attribute, not used.
1926  * @buf: on return contains the maximum rpi count in decimal or "Unknown".
1927  *
1928  * Description:
1929  * Calls lpfc_get_hba_info() asking for just the mrpi count.
1930  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1931  * to "Unknown" and the buffer length is returned, therefore the caller
1932  * must check for "Unknown" in the buffer to detect a failure.
1933  *
1934  * Returns: size of formatted string.
1935  **/
1936 static ssize_t
lpfc_max_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1937 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
1938 		  char *buf)
1939 {
1940 	struct Scsi_Host  *shost = class_to_shost(dev);
1941 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1942 	struct lpfc_hba   *phba = vport->phba;
1943 	uint32_t cnt;
1944 
1945 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
1946 		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
1947 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1948 }
1949 
1950 /**
1951  * lpfc_used_rpi_show - Return maximum rpi minus available rpi
1952  * @dev: class device that is converted into a Scsi_host.
1953  * @attr: device attribute, not used.
1954  * @buf: containing the used rpi count in decimal or "Unknown".
1955  *
1956  * Description:
1957  * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
1958  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1959  * to "Unknown" and the buffer length is returned, therefore the caller
1960  * must check for "Unknown" in the buffer to detect a failure.
1961  *
1962  * Returns: size of formatted string.
1963  **/
1964 static ssize_t
lpfc_used_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1965 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
1966 		   char *buf)
1967 {
1968 	struct Scsi_Host  *shost = class_to_shost(dev);
1969 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1970 	struct lpfc_hba   *phba = vport->phba;
1971 	uint32_t cnt, acnt;
1972 
1973 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
1974 		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
1975 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1976 }
1977 
1978 /**
1979  * lpfc_max_xri_show - Return maximum xri
1980  * @dev: class device that is converted into a Scsi_host.
1981  * @attr: device attribute, not used.
1982  * @buf: on return contains the maximum xri count in decimal or "Unknown".
1983  *
1984  * Description:
1985  * Calls lpfc_get_hba_info() asking for just the mrpi count.
1986  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1987  * to "Unknown" and the buffer length is returned, therefore the caller
1988  * must check for "Unknown" in the buffer to detect a failure.
1989  *
1990  * Returns: size of formatted string.
1991  **/
1992 static ssize_t
lpfc_max_xri_show(struct device * dev,struct device_attribute * attr,char * buf)1993 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
1994 		  char *buf)
1995 {
1996 	struct Scsi_Host  *shost = class_to_shost(dev);
1997 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1998 	struct lpfc_hba   *phba = vport->phba;
1999 	uint32_t cnt;
2000 
2001 	if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2002 		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2003 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2004 }
2005 
2006 /**
2007  * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2008  * @dev: class device that is converted into a Scsi_host.
2009  * @attr: device attribute, not used.
2010  * @buf: on return contains the used xri count in decimal or "Unknown".
2011  *
2012  * Description:
2013  * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2014  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2015  * to "Unknown" and the buffer length is returned, therefore the caller
2016  * must check for "Unknown" in the buffer to detect a failure.
2017  *
2018  * Returns: size of formatted string.
2019  **/
2020 static ssize_t
lpfc_used_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2021 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2022 		   char *buf)
2023 {
2024 	struct Scsi_Host  *shost = class_to_shost(dev);
2025 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2026 	struct lpfc_hba   *phba = vport->phba;
2027 	uint32_t cnt, acnt;
2028 
2029 	if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2030 		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2031 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2032 }
2033 
2034 /**
2035  * lpfc_max_vpi_show - Return maximum vpi
2036  * @dev: class device that is converted into a Scsi_host.
2037  * @attr: device attribute, not used.
2038  * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2039  *
2040  * Description:
2041  * Calls lpfc_get_hba_info() asking for just the mvpi count.
2042  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2043  * to "Unknown" and the buffer length is returned, therefore the caller
2044  * must check for "Unknown" in the buffer to detect a failure.
2045  *
2046  * Returns: size of formatted string.
2047  **/
2048 static ssize_t
lpfc_max_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2049 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2050 		  char *buf)
2051 {
2052 	struct Scsi_Host  *shost = class_to_shost(dev);
2053 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2054 	struct lpfc_hba   *phba = vport->phba;
2055 	uint32_t cnt;
2056 
2057 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2058 		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2059 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2060 }
2061 
2062 /**
2063  * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2064  * @dev: class device that is converted into a Scsi_host.
2065  * @attr: device attribute, not used.
2066  * @buf: on return contains the used vpi count in decimal or "Unknown".
2067  *
2068  * Description:
2069  * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2070  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2071  * to "Unknown" and the buffer length is returned, therefore the caller
2072  * must check for "Unknown" in the buffer to detect a failure.
2073  *
2074  * Returns: size of formatted string.
2075  **/
2076 static ssize_t
lpfc_used_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2077 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2078 		   char *buf)
2079 {
2080 	struct Scsi_Host  *shost = class_to_shost(dev);
2081 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2082 	struct lpfc_hba   *phba = vport->phba;
2083 	uint32_t cnt, acnt;
2084 
2085 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2086 		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2087 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2088 }
2089 
2090 /**
2091  * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2092  * @dev: class device that is converted into a Scsi_host.
2093  * @attr: device attribute, not used.
2094  * @buf: text that must be interpreted to determine if npiv is supported.
2095  *
2096  * Description:
2097  * Buffer will contain text indicating npiv is not suppoerted on the port,
2098  * the port is an NPIV physical port, or it is an npiv virtual port with
2099  * the id of the vport.
2100  *
2101  * Returns: size of formatted string.
2102  **/
2103 static ssize_t
lpfc_npiv_info_show(struct device * dev,struct device_attribute * attr,char * buf)2104 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2105 		    char *buf)
2106 {
2107 	struct Scsi_Host  *shost = class_to_shost(dev);
2108 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2109 	struct lpfc_hba   *phba = vport->phba;
2110 
2111 	if (!(phba->max_vpi))
2112 		return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2113 	if (vport->port_type == LPFC_PHYSICAL_PORT)
2114 		return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2115 	return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2116 }
2117 
2118 /**
2119  * lpfc_poll_show - Return text about poll support for the adapter
2120  * @dev: class device that is converted into a Scsi_host.
2121  * @attr: device attribute, not used.
2122  * @buf: on return contains the cfg_poll in hex.
2123  *
2124  * Notes:
2125  * cfg_poll should be a lpfc_polling_flags type.
2126  *
2127  * Returns: size of formatted string.
2128  **/
2129 static ssize_t
lpfc_poll_show(struct device * dev,struct device_attribute * attr,char * buf)2130 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2131 	       char *buf)
2132 {
2133 	struct Scsi_Host  *shost = class_to_shost(dev);
2134 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2135 	struct lpfc_hba   *phba = vport->phba;
2136 
2137 	return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2138 }
2139 
2140 /**
2141  * lpfc_poll_store - Set the value of cfg_poll for the adapter
2142  * @dev: class device that is converted into a Scsi_host.
2143  * @attr: device attribute, not used.
2144  * @buf: one or more lpfc_polling_flags values.
2145  * @count: not used.
2146  *
2147  * Notes:
2148  * buf contents converted to integer and checked for a valid value.
2149  *
2150  * Returns:
2151  * -EINVAL if the buffer connot be converted or is out of range
2152  * length of the buf on success
2153  **/
2154 static ssize_t
lpfc_poll_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2155 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2156 		const char *buf, size_t count)
2157 {
2158 	struct Scsi_Host  *shost = class_to_shost(dev);
2159 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2160 	struct lpfc_hba   *phba = vport->phba;
2161 	uint32_t creg_val;
2162 	uint32_t old_val;
2163 	int val=0;
2164 
2165 	if (!isdigit(buf[0]))
2166 		return -EINVAL;
2167 
2168 	if (sscanf(buf, "%i", &val) != 1)
2169 		return -EINVAL;
2170 
2171 	if ((val & 0x3) != val)
2172 		return -EINVAL;
2173 
2174 	if (phba->sli_rev == LPFC_SLI_REV4)
2175 		val = 0;
2176 
2177 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2178 		"3051 lpfc_poll changed from %d to %d\n",
2179 		phba->cfg_poll, val);
2180 
2181 	spin_lock_irq(&phba->hbalock);
2182 
2183 	old_val = phba->cfg_poll;
2184 
2185 	if (val & ENABLE_FCP_RING_POLLING) {
2186 		if ((val & DISABLE_FCP_RING_INT) &&
2187 		    !(old_val & DISABLE_FCP_RING_INT)) {
2188 			if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2189 				spin_unlock_irq(&phba->hbalock);
2190 				return -EINVAL;
2191 			}
2192 			creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2193 			writel(creg_val, phba->HCregaddr);
2194 			readl(phba->HCregaddr); /* flush */
2195 
2196 			lpfc_poll_start_timer(phba);
2197 		}
2198 	} else if (val != 0x0) {
2199 		spin_unlock_irq(&phba->hbalock);
2200 		return -EINVAL;
2201 	}
2202 
2203 	if (!(val & DISABLE_FCP_RING_INT) &&
2204 	    (old_val & DISABLE_FCP_RING_INT))
2205 	{
2206 		spin_unlock_irq(&phba->hbalock);
2207 		del_timer(&phba->fcp_poll_timer);
2208 		spin_lock_irq(&phba->hbalock);
2209 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2210 			spin_unlock_irq(&phba->hbalock);
2211 			return -EINVAL;
2212 		}
2213 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2214 		writel(creg_val, phba->HCregaddr);
2215 		readl(phba->HCregaddr); /* flush */
2216 	}
2217 
2218 	phba->cfg_poll = val;
2219 
2220 	spin_unlock_irq(&phba->hbalock);
2221 
2222 	return strlen(buf);
2223 }
2224 
2225 /**
2226  * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2227  * @dev: class converted to a Scsi_host structure.
2228  * @attr: device attribute, not used.
2229  * @buf: on return contains the formatted support level.
2230  *
2231  * Description:
2232  * Returns the maximum number of virtual functions a physical function can
2233  * support, 0 will be returned if called on virtual function.
2234  *
2235  * Returns: size of formatted string.
2236  **/
2237 static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device * dev,struct device_attribute * attr,char * buf)2238 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2239 			      struct device_attribute *attr,
2240 			      char *buf)
2241 {
2242 	struct Scsi_Host *shost = class_to_shost(dev);
2243 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2244 	struct lpfc_hba *phba = vport->phba;
2245 	uint16_t max_nr_virtfn;
2246 
2247 	max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2248 	return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2249 }
2250 
lpfc_rangecheck(uint val,uint min,uint max)2251 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
2252 {
2253 	return val >= min && val <= max;
2254 }
2255 
2256 /**
2257  * lpfc_enable_bbcr_set: Sets an attribute value.
2258  * @phba: pointer the the adapter structure.
2259  * @val: integer attribute value.
2260  *
2261  * Description:
2262  * Validates the min and max values then sets the
2263  * adapter config field if in the valid range. prints error message
2264  * and does not set the parameter if invalid.
2265  *
2266  * Returns:
2267  * zero on success
2268  * -EINVAL if val is invalid
2269  */
2270 static ssize_t
lpfc_enable_bbcr_set(struct lpfc_hba * phba,uint val)2271 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2272 {
2273 	if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2274 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2275 				"3068 lpfc_enable_bbcr changed from %d to "
2276 				"%d\n", phba->cfg_enable_bbcr, val);
2277 		phba->cfg_enable_bbcr = val;
2278 		return 0;
2279 	}
2280 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2281 			"0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
2282 			"1\n", val);
2283 	return -EINVAL;
2284 }
2285 
2286 /*
2287  * lpfc_param_show - Return a cfg attribute value in decimal
2288  *
2289  * Description:
2290  * Macro that given an attr e.g. hba_queue_depth expands
2291  * into a function with the name lpfc_hba_queue_depth_show.
2292  *
2293  * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2294  * @dev: class device that is converted into a Scsi_host.
2295  * @attr: device attribute, not used.
2296  * @buf: on return contains the attribute value in decimal.
2297  *
2298  * Returns: size of formatted string.
2299  **/
2300 #define lpfc_param_show(attr)	\
2301 static ssize_t \
2302 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2303 		   char *buf) \
2304 { \
2305 	struct Scsi_Host  *shost = class_to_shost(dev);\
2306 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2307 	struct lpfc_hba   *phba = vport->phba;\
2308 	return scnprintf(buf, PAGE_SIZE, "%d\n",\
2309 			phba->cfg_##attr);\
2310 }
2311 
2312 /*
2313  * lpfc_param_hex_show - Return a cfg attribute value in hex
2314  *
2315  * Description:
2316  * Macro that given an attr e.g. hba_queue_depth expands
2317  * into a function with the name lpfc_hba_queue_depth_show
2318  *
2319  * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2320  * @dev: class device that is converted into a Scsi_host.
2321  * @attr: device attribute, not used.
2322  * @buf: on return contains the attribute value in hexadecimal.
2323  *
2324  * Returns: size of formatted string.
2325  **/
2326 #define lpfc_param_hex_show(attr)	\
2327 static ssize_t \
2328 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2329 		   char *buf) \
2330 { \
2331 	struct Scsi_Host  *shost = class_to_shost(dev);\
2332 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2333 	struct lpfc_hba   *phba = vport->phba;\
2334 	uint val = 0;\
2335 	val = phba->cfg_##attr;\
2336 	return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2337 			phba->cfg_##attr);\
2338 }
2339 
2340 /*
2341  * lpfc_param_init - Initializes a cfg attribute
2342  *
2343  * Description:
2344  * Macro that given an attr e.g. hba_queue_depth expands
2345  * into a function with the name lpfc_hba_queue_depth_init. The macro also
2346  * takes a default argument, a minimum and maximum argument.
2347  *
2348  * lpfc_##attr##_init: Initializes an attribute.
2349  * @phba: pointer the the adapter structure.
2350  * @val: integer attribute value.
2351  *
2352  * Validates the min and max values then sets the adapter config field
2353  * accordingly, or uses the default if out of range and prints an error message.
2354  *
2355  * Returns:
2356  * zero on success
2357  * -EINVAL if default used
2358  **/
2359 #define lpfc_param_init(attr, default, minval, maxval)	\
2360 static int \
2361 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2362 { \
2363 	if (lpfc_rangecheck(val, minval, maxval)) {\
2364 		phba->cfg_##attr = val;\
2365 		return 0;\
2366 	}\
2367 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2368 			"0449 lpfc_"#attr" attribute cannot be set to %d, "\
2369 			"allowed range is ["#minval", "#maxval"]\n", val); \
2370 	phba->cfg_##attr = default;\
2371 	return -EINVAL;\
2372 }
2373 
2374 /*
2375  * lpfc_param_set - Set a cfg attribute value
2376  *
2377  * Description:
2378  * Macro that given an attr e.g. hba_queue_depth expands
2379  * into a function with the name lpfc_hba_queue_depth_set
2380  *
2381  * lpfc_##attr##_set: Sets an attribute value.
2382  * @phba: pointer the the adapter structure.
2383  * @val: integer attribute value.
2384  *
2385  * Description:
2386  * Validates the min and max values then sets the
2387  * adapter config field if in the valid range. prints error message
2388  * and does not set the parameter if invalid.
2389  *
2390  * Returns:
2391  * zero on success
2392  * -EINVAL if val is invalid
2393  **/
2394 #define lpfc_param_set(attr, default, minval, maxval)	\
2395 static int \
2396 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2397 { \
2398 	if (lpfc_rangecheck(val, minval, maxval)) {\
2399 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2400 			"3052 lpfc_" #attr " changed from %d to %d\n", \
2401 			phba->cfg_##attr, val); \
2402 		phba->cfg_##attr = val;\
2403 		return 0;\
2404 	}\
2405 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2406 			"0450 lpfc_"#attr" attribute cannot be set to %d, "\
2407 			"allowed range is ["#minval", "#maxval"]\n", val); \
2408 	return -EINVAL;\
2409 }
2410 
2411 /*
2412  * lpfc_param_store - Set a vport attribute value
2413  *
2414  * Description:
2415  * Macro that given an attr e.g. hba_queue_depth expands
2416  * into a function with the name lpfc_hba_queue_depth_store.
2417  *
2418  * lpfc_##attr##_store: Set an sttribute value.
2419  * @dev: class device that is converted into a Scsi_host.
2420  * @attr: device attribute, not used.
2421  * @buf: contains the attribute value in ascii.
2422  * @count: not used.
2423  *
2424  * Description:
2425  * Convert the ascii text number to an integer, then
2426  * use the lpfc_##attr##_set function to set the value.
2427  *
2428  * Returns:
2429  * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2430  * length of buffer upon success.
2431  **/
2432 #define lpfc_param_store(attr)	\
2433 static ssize_t \
2434 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2435 		    const char *buf, size_t count) \
2436 { \
2437 	struct Scsi_Host  *shost = class_to_shost(dev);\
2438 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2439 	struct lpfc_hba   *phba = vport->phba;\
2440 	uint val = 0;\
2441 	if (!isdigit(buf[0]))\
2442 		return -EINVAL;\
2443 	if (sscanf(buf, "%i", &val) != 1)\
2444 		return -EINVAL;\
2445 	if (lpfc_##attr##_set(phba, val) == 0) \
2446 		return strlen(buf);\
2447 	else \
2448 		return -EINVAL;\
2449 }
2450 
2451 /*
2452  * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2453  *
2454  * Description:
2455  * Macro that given an attr e.g. hba_queue_depth expands
2456  * into a function with the name lpfc_hba_queue_depth_show
2457  *
2458  * lpfc_##attr##_show: prints the attribute value in decimal.
2459  * @dev: class device that is converted into a Scsi_host.
2460  * @attr: device attribute, not used.
2461  * @buf: on return contains the attribute value in decimal.
2462  *
2463  * Returns: length of formatted string.
2464  **/
2465 #define lpfc_vport_param_show(attr)	\
2466 static ssize_t \
2467 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2468 		   char *buf) \
2469 { \
2470 	struct Scsi_Host  *shost = class_to_shost(dev);\
2471 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2472 	return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2473 }
2474 
2475 /*
2476  * lpfc_vport_param_hex_show - Return hex formatted attribute value
2477  *
2478  * Description:
2479  * Macro that given an attr e.g.
2480  * hba_queue_depth expands into a function with the name
2481  * lpfc_hba_queue_depth_show
2482  *
2483  * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2484  * @dev: class device that is converted into a Scsi_host.
2485  * @attr: device attribute, not used.
2486  * @buf: on return contains the attribute value in hexadecimal.
2487  *
2488  * Returns: length of formatted string.
2489  **/
2490 #define lpfc_vport_param_hex_show(attr)	\
2491 static ssize_t \
2492 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2493 		   char *buf) \
2494 { \
2495 	struct Scsi_Host  *shost = class_to_shost(dev);\
2496 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2497 	return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2498 }
2499 
2500 /*
2501  * lpfc_vport_param_init - Initialize a vport cfg attribute
2502  *
2503  * Description:
2504  * Macro that given an attr e.g. hba_queue_depth expands
2505  * into a function with the name lpfc_hba_queue_depth_init. The macro also
2506  * takes a default argument, a minimum and maximum argument.
2507  *
2508  * lpfc_##attr##_init: validates the min and max values then sets the
2509  * adapter config field accordingly, or uses the default if out of range
2510  * and prints an error message.
2511  * @phba: pointer the the adapter structure.
2512  * @val: integer attribute value.
2513  *
2514  * Returns:
2515  * zero on success
2516  * -EINVAL if default used
2517  **/
2518 #define lpfc_vport_param_init(attr, default, minval, maxval)	\
2519 static int \
2520 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2521 { \
2522 	if (lpfc_rangecheck(val, minval, maxval)) {\
2523 		vport->cfg_##attr = val;\
2524 		return 0;\
2525 	}\
2526 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2527 			 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2528 			 "allowed range is ["#minval", "#maxval"]\n", val); \
2529 	vport->cfg_##attr = default;\
2530 	return -EINVAL;\
2531 }
2532 
2533 /*
2534  * lpfc_vport_param_set - Set a vport cfg attribute
2535  *
2536  * Description:
2537  * Macro that given an attr e.g. hba_queue_depth expands
2538  * into a function with the name lpfc_hba_queue_depth_set
2539  *
2540  * lpfc_##attr##_set: validates the min and max values then sets the
2541  * adapter config field if in the valid range. prints error message
2542  * and does not set the parameter if invalid.
2543  * @phba: pointer the the adapter structure.
2544  * @val:	integer attribute value.
2545  *
2546  * Returns:
2547  * zero on success
2548  * -EINVAL if val is invalid
2549  **/
2550 #define lpfc_vport_param_set(attr, default, minval, maxval)	\
2551 static int \
2552 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2553 { \
2554 	if (lpfc_rangecheck(val, minval, maxval)) {\
2555 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2556 			"3053 lpfc_" #attr \
2557 			" changed from %d (x%x) to %d (x%x)\n", \
2558 			vport->cfg_##attr, vport->cfg_##attr, \
2559 			val, val); \
2560 		vport->cfg_##attr = val;\
2561 		return 0;\
2562 	}\
2563 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2564 			 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2565 			 "allowed range is ["#minval", "#maxval"]\n", val); \
2566 	return -EINVAL;\
2567 }
2568 
2569 /*
2570  * lpfc_vport_param_store - Set a vport attribute
2571  *
2572  * Description:
2573  * Macro that given an attr e.g. hba_queue_depth
2574  * expands into a function with the name lpfc_hba_queue_depth_store
2575  *
2576  * lpfc_##attr##_store: convert the ascii text number to an integer, then
2577  * use the lpfc_##attr##_set function to set the value.
2578  * @cdev: class device that is converted into a Scsi_host.
2579  * @buf:	contains the attribute value in decimal.
2580  * @count: not used.
2581  *
2582  * Returns:
2583  * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2584  * length of buffer upon success.
2585  **/
2586 #define lpfc_vport_param_store(attr)	\
2587 static ssize_t \
2588 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2589 		    const char *buf, size_t count) \
2590 { \
2591 	struct Scsi_Host  *shost = class_to_shost(dev);\
2592 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2593 	uint val = 0;\
2594 	if (!isdigit(buf[0]))\
2595 		return -EINVAL;\
2596 	if (sscanf(buf, "%i", &val) != 1)\
2597 		return -EINVAL;\
2598 	if (lpfc_##attr##_set(vport, val) == 0) \
2599 		return strlen(buf);\
2600 	else \
2601 		return -EINVAL;\
2602 }
2603 
2604 
2605 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2606 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2607 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2608 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2609 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2610 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2611 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2612 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2613 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2614 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2615 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2616 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2617 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2618 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2619 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2620 		lpfc_link_state_store);
2621 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2622 		   lpfc_option_rom_version_show, NULL);
2623 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2624 		   lpfc_num_discovered_ports_show, NULL);
2625 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2626 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2627 static DEVICE_ATTR_RO(lpfc_drvr_version);
2628 static DEVICE_ATTR_RO(lpfc_enable_fip);
2629 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2630 		   lpfc_board_mode_show, lpfc_board_mode_store);
2631 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2632 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2633 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2634 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2635 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2636 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2637 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2638 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2639 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2640 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2641 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2642 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2643 		   NULL);
2644 
2645 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2646 #define WWN_SZ 8
2647 /**
2648  * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2649  * @buf: WWN string.
2650  * @cnt: Length of string.
2651  * @wwn: Array to receive converted wwn value.
2652  *
2653  * Returns:
2654  * -EINVAL if the buffer does not contain a valid wwn
2655  * 0 success
2656  **/
2657 static size_t
lpfc_wwn_set(const char * buf,size_t cnt,char wwn[])2658 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2659 {
2660 	unsigned int i, j;
2661 
2662 	/* Count may include a LF at end of string */
2663 	if (buf[cnt-1] == '\n')
2664 		cnt--;
2665 
2666 	if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2667 	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2668 		return -EINVAL;
2669 
2670 	memset(wwn, 0, WWN_SZ);
2671 
2672 	/* Validate and store the new name */
2673 	for (i = 0, j = 0; i < 16; i++) {
2674 		if ((*buf >= 'a') && (*buf <= 'f'))
2675 			j = ((j << 4) | ((*buf++ - 'a') + 10));
2676 		else if ((*buf >= 'A') && (*buf <= 'F'))
2677 			j = ((j << 4) | ((*buf++ - 'A') + 10));
2678 		else if ((*buf >= '0') && (*buf <= '9'))
2679 			j = ((j << 4) | (*buf++ - '0'));
2680 		else
2681 			return -EINVAL;
2682 		if (i % 2) {
2683 			wwn[i/2] = j & 0xff;
2684 			j = 0;
2685 		}
2686 	}
2687 	return 0;
2688 }
2689 /**
2690  * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2691  * @dev: class device that is converted into a Scsi_host.
2692  * @attr: device attribute, not used.
2693  * @buf: containing the string lpfc_soft_wwn_key.
2694  * @count: must be size of lpfc_soft_wwn_key.
2695  *
2696  * Returns:
2697  * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2698  * length of buf indicates success
2699  **/
2700 static ssize_t
lpfc_soft_wwn_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2701 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2702 			   const char *buf, size_t count)
2703 {
2704 	struct Scsi_Host  *shost = class_to_shost(dev);
2705 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2706 	struct lpfc_hba   *phba = vport->phba;
2707 	unsigned int cnt = count;
2708 	uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2709 	u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2710 
2711 	/*
2712 	 * We're doing a simple sanity check for soft_wwpn setting.
2713 	 * We require that the user write a specific key to enable
2714 	 * the soft_wwpn attribute to be settable. Once the attribute
2715 	 * is written, the enable key resets. If further updates are
2716 	 * desired, the key must be written again to re-enable the
2717 	 * attribute.
2718 	 *
2719 	 * The "key" is not secret - it is a hardcoded string shown
2720 	 * here. The intent is to protect against the random user or
2721 	 * application that is just writing attributes.
2722 	 */
2723 	if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2724 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2725 				"0051 lpfc soft wwpn can not be enabled: "
2726 				"fawwpn is enabled\n");
2727 		return -EINVAL;
2728 	}
2729 
2730 	/* count may include a LF at end of string */
2731 	if (buf[cnt-1] == '\n')
2732 		cnt--;
2733 
2734 	if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2735 	    (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2736 		return -EINVAL;
2737 
2738 	phba->soft_wwn_enable = 1;
2739 
2740 	dev_printk(KERN_WARNING, &phba->pcidev->dev,
2741 		   "lpfc%d: soft_wwpn assignment has been enabled.\n",
2742 		   phba->brd_no);
2743 	dev_printk(KERN_WARNING, &phba->pcidev->dev,
2744 		   "  The soft_wwpn feature is not supported by Broadcom.");
2745 
2746 	return count;
2747 }
2748 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2749 
2750 /**
2751  * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2752  * @dev: class device that is converted into a Scsi_host.
2753  * @attr: device attribute, not used.
2754  * @buf: on return contains the wwpn in hexadecimal.
2755  *
2756  * Returns: size of formatted string.
2757  **/
2758 static ssize_t
lpfc_soft_wwpn_show(struct device * dev,struct device_attribute * attr,char * buf)2759 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2760 		    char *buf)
2761 {
2762 	struct Scsi_Host  *shost = class_to_shost(dev);
2763 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2764 	struct lpfc_hba   *phba = vport->phba;
2765 
2766 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2767 			(unsigned long long)phba->cfg_soft_wwpn);
2768 }
2769 
2770 /**
2771  * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2772  * @dev: class device that is converted into a Scsi_host.
2773  * @attr: device attribute, not used.
2774  * @buf: contains the wwpn in hexadecimal.
2775  * @count: number of wwpn bytes in buf
2776  *
2777  * Returns:
2778  * -EACCES hba reset not enabled, adapter over temp
2779  * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2780  * -EIO error taking adapter offline or online
2781  * value of count on success
2782  **/
2783 static ssize_t
lpfc_soft_wwpn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2784 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2785 		     const char *buf, size_t count)
2786 {
2787 	struct Scsi_Host  *shost = class_to_shost(dev);
2788 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2789 	struct lpfc_hba   *phba = vport->phba;
2790 	struct completion online_compl;
2791 	int stat1 = 0, stat2 = 0;
2792 	unsigned int cnt = count;
2793 	u8 wwpn[WWN_SZ];
2794 	int rc;
2795 
2796 	if (!phba->cfg_enable_hba_reset)
2797 		return -EACCES;
2798 	spin_lock_irq(&phba->hbalock);
2799 	if (phba->over_temp_state == HBA_OVER_TEMP) {
2800 		spin_unlock_irq(&phba->hbalock);
2801 		return -EACCES;
2802 	}
2803 	spin_unlock_irq(&phba->hbalock);
2804 	/* count may include a LF at end of string */
2805 	if (buf[cnt-1] == '\n')
2806 		cnt--;
2807 
2808 	if (!phba->soft_wwn_enable)
2809 		return -EINVAL;
2810 
2811 	/* lock setting wwpn, wwnn down */
2812 	phba->soft_wwn_enable = 0;
2813 
2814 	rc = lpfc_wwn_set(buf, cnt, wwpn);
2815 	if (rc) {
2816 		/* not able to set wwpn, unlock it */
2817 		phba->soft_wwn_enable = 1;
2818 		return rc;
2819 	}
2820 
2821 	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2822 	fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2823 	if (phba->cfg_soft_wwnn)
2824 		fc_host_node_name(shost) = phba->cfg_soft_wwnn;
2825 
2826 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2827 		   "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
2828 
2829 	stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
2830 	if (stat1)
2831 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2832 				"0463 lpfc_soft_wwpn attribute set failed to "
2833 				"reinit adapter - %d\n", stat1);
2834 	init_completion(&online_compl);
2835 	rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
2836 				   LPFC_EVT_ONLINE);
2837 	if (rc == 0)
2838 		return -ENOMEM;
2839 
2840 	wait_for_completion(&online_compl);
2841 	if (stat2)
2842 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2843 				"0464 lpfc_soft_wwpn attribute set failed to "
2844 				"reinit adapter - %d\n", stat2);
2845 	return (stat1 || stat2) ? -EIO : count;
2846 }
2847 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
2848 
2849 /**
2850  * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
2851  * @dev: class device that is converted into a Scsi_host.
2852  * @attr: device attribute, not used.
2853  * @buf: on return contains the wwnn in hexadecimal.
2854  *
2855  * Returns: size of formatted string.
2856  **/
2857 static ssize_t
lpfc_soft_wwnn_show(struct device * dev,struct device_attribute * attr,char * buf)2858 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
2859 		    char *buf)
2860 {
2861 	struct Scsi_Host *shost = class_to_shost(dev);
2862 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2863 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2864 			(unsigned long long)phba->cfg_soft_wwnn);
2865 }
2866 
2867 /**
2868  * lpfc_soft_wwnn_store - sets the ww node name of the adapter
2869  * @dev: class device that is converted into a Scsi_host.
2870  * @attr: device attribute, not used.
2871  * @buf: contains the ww node name in hexadecimal.
2872  * @count: number of wwnn bytes in buf.
2873  *
2874  * Returns:
2875  * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
2876  * value of count on success
2877  **/
2878 static ssize_t
lpfc_soft_wwnn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2879 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2880 		     const char *buf, size_t count)
2881 {
2882 	struct Scsi_Host *shost = class_to_shost(dev);
2883 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2884 	unsigned int cnt = count;
2885 	u8 wwnn[WWN_SZ];
2886 	int rc;
2887 
2888 	/* count may include a LF at end of string */
2889 	if (buf[cnt-1] == '\n')
2890 		cnt--;
2891 
2892 	if (!phba->soft_wwn_enable)
2893 		return -EINVAL;
2894 
2895 	rc = lpfc_wwn_set(buf, cnt, wwnn);
2896 	if (rc) {
2897 		/* Allow wwnn to be set many times, as long as the enable
2898 		 * is set. However, once the wwpn is set, everything locks.
2899 		 */
2900 		return rc;
2901 	}
2902 
2903 	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2904 
2905 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2906 		   "lpfc%d: soft_wwnn set. Value will take effect upon "
2907 		   "setting of the soft_wwpn\n", phba->brd_no);
2908 
2909 	return count;
2910 }
2911 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
2912 
2913 /**
2914  * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2915  *		      Optimized Access Storage (OAS) operations.
2916  * @dev: class device that is converted into a Scsi_host.
2917  * @attr: device attribute, not used.
2918  * @buf: buffer for passing information.
2919  *
2920  * Returns:
2921  * value of count
2922  **/
2923 static ssize_t
lpfc_oas_tgt_show(struct device * dev,struct device_attribute * attr,char * buf)2924 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2925 		  char *buf)
2926 {
2927 	struct Scsi_Host *shost = class_to_shost(dev);
2928 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2929 
2930 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2931 			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2932 }
2933 
2934 /**
2935  * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2936  *		      Optimized Access Storage (OAS) operations.
2937  * @dev: class device that is converted into a Scsi_host.
2938  * @attr: device attribute, not used.
2939  * @buf: buffer for passing information.
2940  * @count: Size of the data buffer.
2941  *
2942  * Returns:
2943  * -EINVAL count is invalid, invalid wwpn byte invalid
2944  * -EPERM oas is not supported by hba
2945  * value of count on success
2946  **/
2947 static ssize_t
lpfc_oas_tgt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2948 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2949 		   const char *buf, size_t count)
2950 {
2951 	struct Scsi_Host *shost = class_to_shost(dev);
2952 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2953 	unsigned int cnt = count;
2954 	uint8_t wwpn[WWN_SZ];
2955 	int rc;
2956 
2957 	if (!phba->cfg_fof)
2958 		return -EPERM;
2959 
2960 	/* count may include a LF at end of string */
2961 	if (buf[cnt-1] == '\n')
2962 		cnt--;
2963 
2964 	rc = lpfc_wwn_set(buf, cnt, wwpn);
2965 	if (rc)
2966 		return rc;
2967 
2968 	memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2969 	memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2970 	if (wwn_to_u64(wwpn) == 0)
2971 		phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2972 	else
2973 		phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2974 	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2975 	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2976 	return count;
2977 }
2978 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
2979 		   lpfc_oas_tgt_show, lpfc_oas_tgt_store);
2980 
2981 /**
2982  * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
2983  *		      Optimized Access Storage (OAS) operations.
2984  * @dev: class device that is converted into a Scsi_host.
2985  * @attr: device attribute, not used.
2986  * @buf: buffer for passing information.
2987  *
2988  * Returns:
2989  * value of count
2990  **/
2991 static ssize_t
lpfc_oas_priority_show(struct device * dev,struct device_attribute * attr,char * buf)2992 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
2993 		       char *buf)
2994 {
2995 	struct Scsi_Host *shost = class_to_shost(dev);
2996 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2997 
2998 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
2999 }
3000 
3001 /**
3002  * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3003  *		      Optimized Access Storage (OAS) operations.
3004  * @dev: class device that is converted into a Scsi_host.
3005  * @attr: device attribute, not used.
3006  * @buf: buffer for passing information.
3007  * @count: Size of the data buffer.
3008  *
3009  * Returns:
3010  * -EINVAL count is invalid, invalid wwpn byte invalid
3011  * -EPERM oas is not supported by hba
3012  * value of count on success
3013  **/
3014 static ssize_t
lpfc_oas_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3015 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3016 			const char *buf, size_t count)
3017 {
3018 	struct Scsi_Host *shost = class_to_shost(dev);
3019 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3020 	unsigned int cnt = count;
3021 	unsigned long val;
3022 	int ret;
3023 
3024 	if (!phba->cfg_fof)
3025 		return -EPERM;
3026 
3027 	/* count may include a LF at end of string */
3028 	if (buf[cnt-1] == '\n')
3029 		cnt--;
3030 
3031 	ret = kstrtoul(buf, 0, &val);
3032 	if (ret || (val > 0x7f))
3033 		return -EINVAL;
3034 
3035 	if (val)
3036 		phba->cfg_oas_priority = (uint8_t)val;
3037 	else
3038 		phba->cfg_oas_priority = phba->cfg_XLanePriority;
3039 	return count;
3040 }
3041 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3042 		   lpfc_oas_priority_show, lpfc_oas_priority_store);
3043 
3044 /**
3045  * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3046  *		      for Optimized Access Storage (OAS) operations.
3047  * @dev: class device that is converted into a Scsi_host.
3048  * @attr: device attribute, not used.
3049  * @buf: buffer for passing information.
3050  *
3051  * Returns:
3052  * value of count on success
3053  **/
3054 static ssize_t
lpfc_oas_vpt_show(struct device * dev,struct device_attribute * attr,char * buf)3055 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3056 		  char *buf)
3057 {
3058 	struct Scsi_Host *shost = class_to_shost(dev);
3059 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3060 
3061 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3062 			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3063 }
3064 
3065 /**
3066  * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3067  *		      for Optimized Access Storage (OAS) operations.
3068  * @dev: class device that is converted into a Scsi_host.
3069  * @attr: device attribute, not used.
3070  * @buf: buffer for passing information.
3071  * @count: Size of the data buffer.
3072  *
3073  * Returns:
3074  * -EINVAL count is invalid, invalid wwpn byte invalid
3075  * -EPERM oas is not supported by hba
3076  * value of count on success
3077  **/
3078 static ssize_t
lpfc_oas_vpt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3079 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3080 		   const char *buf, size_t count)
3081 {
3082 	struct Scsi_Host *shost = class_to_shost(dev);
3083 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3084 	unsigned int cnt = count;
3085 	uint8_t wwpn[WWN_SZ];
3086 	int rc;
3087 
3088 	if (!phba->cfg_fof)
3089 		return -EPERM;
3090 
3091 	/* count may include a LF at end of string */
3092 	if (buf[cnt-1] == '\n')
3093 		cnt--;
3094 
3095 	rc = lpfc_wwn_set(buf, cnt, wwpn);
3096 	if (rc)
3097 		return rc;
3098 
3099 	memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3100 	memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3101 	if (wwn_to_u64(wwpn) == 0)
3102 		phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3103 	else
3104 		phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3105 	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3106 	if (phba->cfg_oas_priority == 0)
3107 		phba->cfg_oas_priority = phba->cfg_XLanePriority;
3108 	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3109 	return count;
3110 }
3111 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3112 		   lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3113 
3114 /**
3115  * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3116  *			    of whether luns will be enabled or disabled
3117  *			    for Optimized Access Storage (OAS) operations.
3118  * @dev: class device that is converted into a Scsi_host.
3119  * @attr: device attribute, not used.
3120  * @buf: buffer for passing information.
3121  *
3122  * Returns:
3123  * size of formatted string.
3124  **/
3125 static ssize_t
lpfc_oas_lun_state_show(struct device * dev,struct device_attribute * attr,char * buf)3126 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3127 			char *buf)
3128 {
3129 	struct Scsi_Host *shost = class_to_shost(dev);
3130 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3131 
3132 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3133 }
3134 
3135 /**
3136  * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3137  *			    of whether luns will be enabled or disabled
3138  *			    for Optimized Access Storage (OAS) operations.
3139  * @dev: class device that is converted into a Scsi_host.
3140  * @attr: device attribute, not used.
3141  * @buf: buffer for passing information.
3142  * @count: Size of the data buffer.
3143  *
3144  * Returns:
3145  * -EINVAL count is invalid, invalid wwpn byte invalid
3146  * -EPERM oas is not supported by hba
3147  * value of count on success
3148  **/
3149 static ssize_t
lpfc_oas_lun_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3150 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3151 			 const char *buf, size_t count)
3152 {
3153 	struct Scsi_Host *shost = class_to_shost(dev);
3154 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3155 	int val = 0;
3156 
3157 	if (!phba->cfg_fof)
3158 		return -EPERM;
3159 
3160 	if (!isdigit(buf[0]))
3161 		return -EINVAL;
3162 
3163 	if (sscanf(buf, "%i", &val) != 1)
3164 		return -EINVAL;
3165 
3166 	if ((val != 0) && (val != 1))
3167 		return -EINVAL;
3168 
3169 	phba->cfg_oas_lun_state = val;
3170 	return strlen(buf);
3171 }
3172 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3173 		   lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3174 
3175 /**
3176  * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3177  *                          Storage (OAS) lun returned by the
3178  *                          lpfc_oas_lun_show function.
3179  * @dev: class device that is converted into a Scsi_host.
3180  * @attr: device attribute, not used.
3181  * @buf: buffer for passing information.
3182  *
3183  * Returns:
3184  * size of formatted string.
3185  **/
3186 static ssize_t
lpfc_oas_lun_status_show(struct device * dev,struct device_attribute * attr,char * buf)3187 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3188 			 char *buf)
3189 {
3190 	struct Scsi_Host *shost = class_to_shost(dev);
3191 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3192 
3193 	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3194 		return -EFAULT;
3195 
3196 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3197 }
3198 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3199 		   lpfc_oas_lun_status_show, NULL);
3200 
3201 
3202 /**
3203  * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3204  *			   (OAS) operations.
3205  * @phba: lpfc_hba pointer.
3206  * @vpt_wwpn: wwpn of the vport associated with the returned lun
3207  * @tgt_wwpn: wwpn of the target associated with the returned lun
3208  * @lun: the fc lun for setting oas state.
3209  * @oas_state: the oas state to be set to the lun.
3210  * @pri: priority
3211  *
3212  * Returns:
3213  * SUCCESS : 0
3214  * -EPERM OAS is not enabled or not supported by this port.
3215  *
3216  */
3217 static size_t
lpfc_oas_lun_state_set(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3218 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3219 		       uint8_t tgt_wwpn[], uint64_t lun,
3220 		       uint32_t oas_state, uint8_t pri)
3221 {
3222 
3223 	int rc = 0;
3224 
3225 	if (!phba->cfg_fof)
3226 		return -EPERM;
3227 
3228 	if (oas_state) {
3229 		if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3230 					 (struct lpfc_name *)tgt_wwpn,
3231 					 lun, pri))
3232 			rc = -ENOMEM;
3233 	} else {
3234 		lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3235 				     (struct lpfc_name *)tgt_wwpn, lun, pri);
3236 	}
3237 	return rc;
3238 
3239 }
3240 
3241 /**
3242  * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3243  *			  Access Storage (OAS) operations.
3244  * @phba: lpfc_hba pointer.
3245  * @vpt_wwpn: wwpn of the vport associated with the returned lun
3246  * @tgt_wwpn: wwpn of the target associated with the returned lun
3247  * @lun_status: status of the lun returned lun
3248  * @lun_pri: priority of the lun returned lun
3249  *
3250  * Returns the first or next lun enabled for OAS operations for the vport/target
3251  * specified.  If a lun is found, its vport wwpn, target wwpn and status is
3252  * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3253  *
3254  * Return:
3255  * lun that is OAS enabled for the vport/target
3256  * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3257  */
3258 static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint32_t * lun_status,uint32_t * lun_pri)3259 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3260 		      uint8_t tgt_wwpn[], uint32_t *lun_status,
3261 		      uint32_t *lun_pri)
3262 {
3263 	uint64_t found_lun;
3264 
3265 	if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3266 		return NOT_OAS_ENABLED_LUN;
3267 	if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3268 				   phba->sli4_hba.oas_next_vpt_wwpn,
3269 				   (struct lpfc_name *)
3270 				   phba->sli4_hba.oas_next_tgt_wwpn,
3271 				   &phba->sli4_hba.oas_next_lun,
3272 				   (struct lpfc_name *)vpt_wwpn,
3273 				   (struct lpfc_name *)tgt_wwpn,
3274 				   &found_lun, lun_status, lun_pri))
3275 		return found_lun;
3276 	else
3277 		return NOT_OAS_ENABLED_LUN;
3278 }
3279 
3280 /**
3281  * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3282  * @phba: lpfc_hba pointer.
3283  * @vpt_wwpn: vport wwpn by reference.
3284  * @tgt_wwpn: target wwpn by reference.
3285  * @lun: the fc lun for setting oas state.
3286  * @oas_state: the oas state to be set to the oas_lun.
3287  * @pri: priority
3288  *
3289  * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3290  * a lun for OAS operations.
3291  *
3292  * Return:
3293  * SUCCESS: 0
3294  * -ENOMEM: failed to enable an lun for OAS operations
3295  * -EPERM: OAS is not enabled
3296  */
3297 static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3298 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3299 			  uint8_t tgt_wwpn[], uint64_t lun,
3300 			  uint32_t oas_state, uint8_t pri)
3301 {
3302 
3303 	int rc;
3304 
3305 	rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3306 				    oas_state, pri);
3307 	return rc;
3308 }
3309 
3310 /**
3311  * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3312  * @dev: class device that is converted into a Scsi_host.
3313  * @attr: device attribute, not used.
3314  * @buf: buffer for passing information.
3315  *
3316  * This routine returns a lun enabled for OAS each time the function
3317  * is called.
3318  *
3319  * Returns:
3320  * SUCCESS: size of formatted string.
3321  * -EFAULT: target or vport wwpn was not set properly.
3322  * -EPERM: oas is not enabled.
3323  **/
3324 static ssize_t
lpfc_oas_lun_show(struct device * dev,struct device_attribute * attr,char * buf)3325 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3326 		  char *buf)
3327 {
3328 	struct Scsi_Host *shost = class_to_shost(dev);
3329 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3330 
3331 	uint64_t oas_lun;
3332 	int len = 0;
3333 
3334 	if (!phba->cfg_fof)
3335 		return -EPERM;
3336 
3337 	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3338 		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3339 			return -EFAULT;
3340 
3341 	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3342 		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3343 			return -EFAULT;
3344 
3345 	oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3346 					phba->cfg_oas_tgt_wwpn,
3347 					&phba->cfg_oas_lun_status,
3348 					&phba->cfg_oas_priority);
3349 	if (oas_lun != NOT_OAS_ENABLED_LUN)
3350 		phba->cfg_oas_flags |= OAS_LUN_VALID;
3351 
3352 	len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3353 
3354 	return len;
3355 }
3356 
3357 /**
3358  * lpfc_oas_lun_store - Sets the OAS state for lun
3359  * @dev: class device that is converted into a Scsi_host.
3360  * @attr: device attribute, not used.
3361  * @buf: buffer for passing information.
3362  * @count: size of the formatting string
3363  *
3364  * This function sets the OAS state for lun.  Before this function is called,
3365  * the vport wwpn, target wwpn, and oas state need to be set.
3366  *
3367  * Returns:
3368  * SUCCESS: size of formatted string.
3369  * -EFAULT: target or vport wwpn was not set properly.
3370  * -EPERM: oas is not enabled.
3371  * size of formatted string.
3372  **/
3373 static ssize_t
lpfc_oas_lun_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3374 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3375 		   const char *buf, size_t count)
3376 {
3377 	struct Scsi_Host *shost = class_to_shost(dev);
3378 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3379 	uint64_t scsi_lun;
3380 	uint32_t pri;
3381 	ssize_t rc;
3382 
3383 	if (!phba->cfg_fof)
3384 		return -EPERM;
3385 
3386 	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3387 		return -EFAULT;
3388 
3389 	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3390 		return -EFAULT;
3391 
3392 	if (!isdigit(buf[0]))
3393 		return -EINVAL;
3394 
3395 	if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3396 		return -EINVAL;
3397 
3398 	pri = phba->cfg_oas_priority;
3399 	if (pri == 0)
3400 		pri = phba->cfg_XLanePriority;
3401 
3402 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3403 			"3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3404 			"priority 0x%x with oas state %d\n",
3405 			wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3406 			wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3407 			pri, phba->cfg_oas_lun_state);
3408 
3409 	rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3410 				       phba->cfg_oas_tgt_wwpn, scsi_lun,
3411 				       phba->cfg_oas_lun_state, pri);
3412 	if (rc)
3413 		return rc;
3414 
3415 	return count;
3416 }
3417 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3418 		   lpfc_oas_lun_show, lpfc_oas_lun_store);
3419 
3420 int lpfc_enable_nvmet_cnt;
3421 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3422 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3423 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3424 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3425 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3426 
3427 static int lpfc_poll = 0;
3428 module_param(lpfc_poll, int, S_IRUGO);
3429 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3430 		 " 0 - none,"
3431 		 " 1 - poll with interrupts enabled"
3432 		 " 3 - poll and disable FCP ring interrupts");
3433 
3434 static DEVICE_ATTR_RW(lpfc_poll);
3435 
3436 int lpfc_no_hba_reset_cnt;
3437 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3438 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3439 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3440 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3441 
3442 LPFC_ATTR(sli_mode, 3, 3, 3,
3443 	"SLI mode selector: 3 - select SLI-3");
3444 
3445 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3446 	"Enable NPIV functionality");
3447 
3448 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3449 	"FCF Fast failover=1 Priority failover=2");
3450 
3451 /*
3452 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3453 #	0x0 = disabled, XRI/OXID use not tracked.
3454 #	0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3455 #	0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3456 */
3457 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3458 	"Enable RRQ functionality");
3459 
3460 /*
3461 # lpfc_suppress_link_up:  Bring link up at initialization
3462 #            0x0  = bring link up (issue MBX_INIT_LINK)
3463 #            0x1  = do NOT bring link up at initialization(MBX_INIT_LINK)
3464 #            0x2  = never bring up link
3465 # Default value is 0.
3466 */
3467 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3468 		LPFC_DELAY_INIT_LINK_INDEFINITELY,
3469 		"Suppress Link Up at initialization");
3470 
3471 static ssize_t
lpfc_pls_show(struct device * dev,struct device_attribute * attr,char * buf)3472 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3473 {
3474 	struct Scsi_Host  *shost = class_to_shost(dev);
3475 	struct lpfc_hba   *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3476 
3477 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3478 			 phba->sli4_hba.pc_sli4_params.pls);
3479 }
3480 static DEVICE_ATTR(pls, 0444,
3481 			 lpfc_pls_show, NULL);
3482 
3483 static ssize_t
lpfc_pt_show(struct device * dev,struct device_attribute * attr,char * buf)3484 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3485 {
3486 	struct Scsi_Host  *shost = class_to_shost(dev);
3487 	struct lpfc_hba   *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3488 
3489 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3490 			 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3491 }
3492 static DEVICE_ATTR(pt, 0444,
3493 			 lpfc_pt_show, NULL);
3494 
3495 /*
3496 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3497 #       1 - (1024)
3498 #       2 - (2048)
3499 #       3 - (3072)
3500 #       4 - (4096)
3501 #       5 - (5120)
3502 */
3503 static ssize_t
lpfc_iocb_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3504 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3505 {
3506 	struct Scsi_Host  *shost = class_to_shost(dev);
3507 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3508 
3509 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3510 }
3511 
3512 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3513 			 lpfc_iocb_hw_show, NULL);
3514 static ssize_t
lpfc_txq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3515 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3516 {
3517 	struct Scsi_Host  *shost = class_to_shost(dev);
3518 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3519 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3520 
3521 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3522 			pring ? pring->txq_max : 0);
3523 }
3524 
3525 static DEVICE_ATTR(txq_hw, S_IRUGO,
3526 			 lpfc_txq_hw_show, NULL);
3527 static ssize_t
lpfc_txcmplq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3528 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3529  char *buf)
3530 {
3531 	struct Scsi_Host  *shost = class_to_shost(dev);
3532 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3533 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3534 
3535 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3536 			pring ? pring->txcmplq_max : 0);
3537 }
3538 
3539 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3540 			 lpfc_txcmplq_hw_show, NULL);
3541 
3542 /*
3543 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3544 # until the timer expires. Value range is [0,255]. Default value is 30.
3545 */
3546 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3547 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3548 module_param(lpfc_nodev_tmo, int, 0);
3549 MODULE_PARM_DESC(lpfc_nodev_tmo,
3550 		 "Seconds driver will hold I/O waiting "
3551 		 "for a device to come back");
3552 
3553 /**
3554  * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3555  * @dev: class converted to a Scsi_host structure.
3556  * @attr: device attribute, not used.
3557  * @buf: on return contains the dev loss timeout in decimal.
3558  *
3559  * Returns: size of formatted string.
3560  **/
3561 static ssize_t
lpfc_nodev_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)3562 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3563 		    char *buf)
3564 {
3565 	struct Scsi_Host  *shost = class_to_shost(dev);
3566 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3567 
3568 	return scnprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
3569 }
3570 
3571 /**
3572  * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3573  * @vport: lpfc vport structure pointer.
3574  * @val: contains the nodev timeout value.
3575  *
3576  * Description:
3577  * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3578  * a kernel error message is printed and zero is returned.
3579  * Else if val is in range then nodev tmo and devloss tmo are set to val.
3580  * Otherwise nodev tmo is set to the default value.
3581  *
3582  * Returns:
3583  * zero if already set or if val is in range
3584  * -EINVAL val out of range
3585  **/
3586 static int
lpfc_nodev_tmo_init(struct lpfc_vport * vport,int val)3587 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3588 {
3589 	if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3590 		vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3591 		if (val != LPFC_DEF_DEVLOSS_TMO)
3592 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3593 					 "0407 Ignoring lpfc_nodev_tmo module "
3594 					 "parameter because lpfc_devloss_tmo "
3595 					 "is set.\n");
3596 		return 0;
3597 	}
3598 
3599 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3600 		vport->cfg_nodev_tmo = val;
3601 		vport->cfg_devloss_tmo = val;
3602 		return 0;
3603 	}
3604 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3605 			 "0400 lpfc_nodev_tmo attribute cannot be set to"
3606 			 " %d, allowed range is [%d, %d]\n",
3607 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3608 	vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3609 	return -EINVAL;
3610 }
3611 
3612 /**
3613  * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3614  * @vport: lpfc vport structure pointer.
3615  *
3616  * Description:
3617  * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3618  **/
3619 static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport * vport)3620 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3621 {
3622 	struct Scsi_Host  *shost;
3623 	struct lpfc_nodelist  *ndlp;
3624 #if (IS_ENABLED(CONFIG_NVME_FC))
3625 	struct lpfc_nvme_rport *rport;
3626 	struct nvme_fc_remote_port *remoteport = NULL;
3627 #endif
3628 
3629 	shost = lpfc_shost_from_vport(vport);
3630 	spin_lock_irq(shost->host_lock);
3631 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3632 		if (ndlp->rport)
3633 			ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3634 #if (IS_ENABLED(CONFIG_NVME_FC))
3635 		spin_lock(&ndlp->lock);
3636 		rport = lpfc_ndlp_get_nrport(ndlp);
3637 		if (rport)
3638 			remoteport = rport->remoteport;
3639 		spin_unlock(&ndlp->lock);
3640 		if (rport && remoteport)
3641 			nvme_fc_set_remoteport_devloss(remoteport,
3642 						       vport->cfg_devloss_tmo);
3643 #endif
3644 	}
3645 	spin_unlock_irq(shost->host_lock);
3646 }
3647 
3648 /**
3649  * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3650  * @vport: lpfc vport structure pointer.
3651  * @val: contains the tmo value.
3652  *
3653  * Description:
3654  * If the devloss tmo is already set or the vport dev loss tmo has changed
3655  * then a kernel error message is printed and zero is returned.
3656  * Else if val is in range then nodev tmo and devloss tmo are set to val.
3657  * Otherwise nodev tmo is set to the default value.
3658  *
3659  * Returns:
3660  * zero if already set or if val is in range
3661  * -EINVAL val out of range
3662  **/
3663 static int
lpfc_nodev_tmo_set(struct lpfc_vport * vport,int val)3664 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3665 {
3666 	if (vport->dev_loss_tmo_changed ||
3667 	    (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3668 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3669 				 "0401 Ignoring change to lpfc_nodev_tmo "
3670 				 "because lpfc_devloss_tmo is set.\n");
3671 		return 0;
3672 	}
3673 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3674 		vport->cfg_nodev_tmo = val;
3675 		vport->cfg_devloss_tmo = val;
3676 		/*
3677 		 * For compat: set the fc_host dev loss so new rports
3678 		 * will get the value.
3679 		 */
3680 		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3681 		lpfc_update_rport_devloss_tmo(vport);
3682 		return 0;
3683 	}
3684 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3685 			 "0403 lpfc_nodev_tmo attribute cannot be set to "
3686 			 "%d, allowed range is [%d, %d]\n",
3687 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3688 	return -EINVAL;
3689 }
3690 
3691 lpfc_vport_param_store(nodev_tmo)
3692 
3693 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3694 
3695 /*
3696 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3697 # disappear until the timer expires. Value range is [0,255]. Default
3698 # value is 30.
3699 */
3700 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3701 MODULE_PARM_DESC(lpfc_devloss_tmo,
3702 		 "Seconds driver will hold I/O waiting "
3703 		 "for a device to come back");
lpfc_vport_param_init(devloss_tmo,LPFC_DEF_DEVLOSS_TMO,LPFC_MIN_DEVLOSS_TMO,LPFC_MAX_DEVLOSS_TMO)3704 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3705 		      LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3706 lpfc_vport_param_show(devloss_tmo)
3707 
3708 /**
3709  * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3710  * @vport: lpfc vport structure pointer.
3711  * @val: contains the tmo value.
3712  *
3713  * Description:
3714  * If val is in a valid range then set the vport nodev tmo,
3715  * devloss tmo, also set the vport dev loss tmo changed flag.
3716  * Else a kernel error message is printed.
3717  *
3718  * Returns:
3719  * zero if val is in range
3720  * -EINVAL val out of range
3721  **/
3722 static int
3723 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3724 {
3725 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3726 		vport->cfg_nodev_tmo = val;
3727 		vport->cfg_devloss_tmo = val;
3728 		vport->dev_loss_tmo_changed = 1;
3729 		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3730 		lpfc_update_rport_devloss_tmo(vport);
3731 		return 0;
3732 	}
3733 
3734 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3735 			 "0404 lpfc_devloss_tmo attribute cannot be set to "
3736 			 "%d, allowed range is [%d, %d]\n",
3737 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3738 	return -EINVAL;
3739 }
3740 
3741 lpfc_vport_param_store(devloss_tmo)
3742 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3743 
3744 /*
3745  * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3746  * lpfc_suppress_rsp = 0  Disable
3747  * lpfc_suppress_rsp = 1  Enable (default)
3748  *
3749  */
3750 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3751 	    "Enable suppress rsp feature is firmware supports it");
3752 
3753 /*
3754  * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3755  * lpfc_nvmet_mrq = 0  driver will calcualte optimal number of RQ pairs
3756  * lpfc_nvmet_mrq = 1  use a single RQ pair
3757  * lpfc_nvmet_mrq >= 2  use specified RQ pairs for MRQ
3758  *
3759  */
3760 LPFC_ATTR_R(nvmet_mrq,
3761 	    LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3762 	    "Specify number of RQ pairs for processing NVMET cmds");
3763 
3764 /*
3765  * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3766  * to each NVMET RQ. Range 64 to 2048, default is 512.
3767  */
3768 LPFC_ATTR_R(nvmet_mrq_post,
3769 	    LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3770 	    LPFC_NVMET_RQE_DEF_COUNT,
3771 	    "Specify number of RQ buffers to initially post");
3772 
3773 /*
3774  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3775  * Supported Values:  1 - register just FCP
3776  *                    3 - register both FCP and NVME
3777  * Supported values are [1,3]. Default value is 3
3778  */
3779 LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
3780 	    LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3781 	    "Enable FC4 Protocol support - FCP / NVME");
3782 
3783 /*
3784 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3785 # deluged with LOTS of information.
3786 # You can set a bit mask to record specific types of verbose messages:
3787 # See lpfc_logmsh.h for definitions.
3788 */
3789 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3790 		       "Verbose logging bit-mask");
3791 
3792 /*
3793 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3794 # objects that have been registered with the nameserver after login.
3795 */
3796 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3797 		  "Deregister nameserver objects before LOGO");
3798 
3799 /*
3800 # lun_queue_depth:  This parameter is used to limit the number of outstanding
3801 # commands per FCP LUN.
3802 */
3803 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3804 		  "Max number of FCP commands we can queue to a specific LUN");
3805 
3806 /*
3807 # tgt_queue_depth:  This parameter is used to limit the number of outstanding
3808 # commands per target port. Value range is [10,65535]. Default value is 65535.
3809 */
3810 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3811 module_param(lpfc_tgt_queue_depth, uint, 0444);
3812 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3813 lpfc_vport_param_show(tgt_queue_depth);
3814 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3815 		      LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3816 
3817 /**
3818  * lpfc_tgt_queue_depth_set: Sets an attribute value.
3819  * @vport: lpfc vport structure pointer.
3820  * @val: integer attribute value.
3821  *
3822  * Description: Sets the parameter to the new value.
3823  *
3824  * Returns:
3825  * zero on success
3826  * -EINVAL if val is invalid
3827  */
3828 static int
lpfc_tgt_queue_depth_set(struct lpfc_vport * vport,uint val)3829 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3830 {
3831 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3832 	struct lpfc_nodelist *ndlp;
3833 
3834 	if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3835 		return -EINVAL;
3836 
3837 	if (val == vport->cfg_tgt_queue_depth)
3838 		return 0;
3839 
3840 	spin_lock_irq(shost->host_lock);
3841 	vport->cfg_tgt_queue_depth = val;
3842 
3843 	/* Next loop thru nodelist and change cmd_qdepth */
3844 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3845 		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3846 
3847 	spin_unlock_irq(shost->host_lock);
3848 	return 0;
3849 }
3850 
3851 lpfc_vport_param_store(tgt_queue_depth);
3852 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3853 
3854 /*
3855 # hba_queue_depth:  This parameter is used to limit the number of outstanding
3856 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
3857 # value is greater than the maximum number of exchanges supported by the HBA,
3858 # then maximum number of exchanges supported by the HBA is used to determine
3859 # the hba_queue_depth.
3860 */
3861 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3862 	    "Max number of FCP commands we can queue to a lpfc HBA");
3863 
3864 /*
3865 # peer_port_login:  This parameter allows/prevents logins
3866 # between peer ports hosted on the same physical port.
3867 # When this parameter is set 0 peer ports of same physical port
3868 # are not allowed to login to each other.
3869 # When this parameter is set 1 peer ports of same physical port
3870 # are allowed to login to each other.
3871 # Default value of this parameter is 0.
3872 */
3873 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3874 		  "Allow peer ports on the same physical port to login to each "
3875 		  "other.");
3876 
3877 /*
3878 # restrict_login:  This parameter allows/prevents logins
3879 # between Virtual Ports and remote initiators.
3880 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
3881 # other initiators and will attempt to PLOGI all remote ports.
3882 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
3883 # remote ports and will not attempt to PLOGI to other initiators.
3884 # This parameter does not restrict to the physical port.
3885 # This parameter does not restrict logins to Fabric resident remote ports.
3886 # Default value of this parameter is 1.
3887 */
3888 static int lpfc_restrict_login = 1;
3889 module_param(lpfc_restrict_login, int, S_IRUGO);
3890 MODULE_PARM_DESC(lpfc_restrict_login,
3891 		 "Restrict virtual ports login to remote initiators.");
3892 lpfc_vport_param_show(restrict_login);
3893 
3894 /**
3895  * lpfc_restrict_login_init - Set the vport restrict login flag
3896  * @vport: lpfc vport structure pointer.
3897  * @val: contains the restrict login value.
3898  *
3899  * Description:
3900  * If val is not in a valid range then log a kernel error message and set
3901  * the vport restrict login to one.
3902  * If the port type is physical clear the restrict login flag and return.
3903  * Else set the restrict login flag to val.
3904  *
3905  * Returns:
3906  * zero if val is in range
3907  * -EINVAL val out of range
3908  **/
3909 static int
lpfc_restrict_login_init(struct lpfc_vport * vport,int val)3910 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3911 {
3912 	if (val < 0 || val > 1) {
3913 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3914 				 "0422 lpfc_restrict_login attribute cannot "
3915 				 "be set to %d, allowed range is [0, 1]\n",
3916 				 val);
3917 		vport->cfg_restrict_login = 1;
3918 		return -EINVAL;
3919 	}
3920 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
3921 		vport->cfg_restrict_login = 0;
3922 		return 0;
3923 	}
3924 	vport->cfg_restrict_login = val;
3925 	return 0;
3926 }
3927 
3928 /**
3929  * lpfc_restrict_login_set - Set the vport restrict login flag
3930  * @vport: lpfc vport structure pointer.
3931  * @val: contains the restrict login value.
3932  *
3933  * Description:
3934  * If val is not in a valid range then log a kernel error message and set
3935  * the vport restrict login to one.
3936  * If the port type is physical and the val is not zero log a kernel
3937  * error message, clear the restrict login flag and return zero.
3938  * Else set the restrict login flag to val.
3939  *
3940  * Returns:
3941  * zero if val is in range
3942  * -EINVAL val out of range
3943  **/
3944 static int
lpfc_restrict_login_set(struct lpfc_vport * vport,int val)3945 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3946 {
3947 	if (val < 0 || val > 1) {
3948 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3949 				 "0425 lpfc_restrict_login attribute cannot "
3950 				 "be set to %d, allowed range is [0, 1]\n",
3951 				 val);
3952 		vport->cfg_restrict_login = 1;
3953 		return -EINVAL;
3954 	}
3955 	if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
3956 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3957 				 "0468 lpfc_restrict_login must be 0 for "
3958 				 "Physical ports.\n");
3959 		vport->cfg_restrict_login = 0;
3960 		return 0;
3961 	}
3962 	vport->cfg_restrict_login = val;
3963 	return 0;
3964 }
3965 lpfc_vport_param_store(restrict_login);
3966 static DEVICE_ATTR_RW(lpfc_restrict_login);
3967 
3968 /*
3969 # Some disk devices have a "select ID" or "select Target" capability.
3970 # From a protocol standpoint "select ID" usually means select the
3971 # Fibre channel "ALPA".  In the FC-AL Profile there is an "informative
3972 # annex" which contains a table that maps a "select ID" (a number
3973 # between 0 and 7F) to an ALPA.  By default, for compatibility with
3974 # older drivers, the lpfc driver scans this table from low ALPA to high
3975 # ALPA.
3976 #
3977 # Turning on the scan-down variable (on  = 1, off = 0) will
3978 # cause the lpfc driver to use an inverted table, effectively
3979 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
3980 #
3981 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
3982 # and will not work across a fabric. Also this parameter will take
3983 # effect only in the case when ALPA map is not available.)
3984 */
3985 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
3986 		  "Start scanning for devices from highest ALPA to lowest");
3987 
3988 /*
3989 # lpfc_topology:  link topology for init link
3990 #            0x0  = attempt loop mode then point-to-point
3991 #            0x01 = internal loopback mode
3992 #            0x02 = attempt point-to-point mode only
3993 #            0x04 = attempt loop mode only
3994 #            0x06 = attempt point-to-point mode then loop
3995 # Set point-to-point mode if you want to run as an N_Port.
3996 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
3997 # Default value is 0.
3998 */
3999 LPFC_ATTR(topology, 0, 0, 6,
4000 	"Select Fibre Channel topology");
4001 
4002 /**
4003  * lpfc_topology_store - Set the adapters topology field
4004  * @dev: class device that is converted into a scsi_host.
4005  * @attr:device attribute, not used.
4006  * @buf: buffer for passing information.
4007  * @count: size of the data buffer.
4008  *
4009  * Description:
4010  * If val is in a valid range then set the adapter's topology field and
4011  * issue a lip; if the lip fails reset the topology to the old value.
4012  *
4013  * If the value is not in range log a kernel error message and return an error.
4014  *
4015  * Returns:
4016  * zero if val is in range and lip okay
4017  * non-zero return value from lpfc_issue_lip()
4018  * -EINVAL val out of range
4019  **/
4020 static ssize_t
lpfc_topology_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4021 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4022 			const char *buf, size_t count)
4023 {
4024 	struct Scsi_Host  *shost = class_to_shost(dev);
4025 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4026 	struct lpfc_hba   *phba = vport->phba;
4027 	int val = 0;
4028 	int nolip = 0;
4029 	const char *val_buf = buf;
4030 	int err;
4031 	uint32_t prev_val;
4032 
4033 	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4034 		nolip = 1;
4035 		val_buf = &buf[strlen("nolip ")];
4036 	}
4037 
4038 	if (!isdigit(val_buf[0]))
4039 		return -EINVAL;
4040 	if (sscanf(val_buf, "%i", &val) != 1)
4041 		return -EINVAL;
4042 
4043 	if (val >= 0 && val <= 6) {
4044 		prev_val = phba->cfg_topology;
4045 		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4046 			val == 4) {
4047 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4048 				"3113 Loop mode not supported at speed %d\n",
4049 				val);
4050 			return -EINVAL;
4051 		}
4052 		/*
4053 		 * The 'topology' is not a configurable parameter if :
4054 		 *   - persistent topology enabled
4055 		 *   - G7/G6 with no private loop support
4056 		 */
4057 
4058 		if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4059 		     (!phba->sli4_hba.pc_sli4_params.pls &&
4060 		     (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4061 		     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
4062 		    val == 4) {
4063 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4064 				"3114 Loop mode not supported\n");
4065 			return -EINVAL;
4066 		}
4067 		phba->cfg_topology = val;
4068 		if (nolip)
4069 			return strlen(buf);
4070 
4071 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4072 			"3054 lpfc_topology changed from %d to %d\n",
4073 			prev_val, val);
4074 		if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4075 			phba->fc_topology_changed = 1;
4076 		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4077 		if (err) {
4078 			phba->cfg_topology = prev_val;
4079 			return -EINVAL;
4080 		} else
4081 			return strlen(buf);
4082 	}
4083 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4084 		"%d:0467 lpfc_topology attribute cannot be set to %d, "
4085 		"allowed range is [0, 6]\n",
4086 		phba->brd_no, val);
4087 	return -EINVAL;
4088 }
4089 
4090 lpfc_param_show(topology)
4091 static DEVICE_ATTR_RW(lpfc_topology);
4092 
4093 /**
4094  * lpfc_static_vport_show: Read callback function for
4095  *   lpfc_static_vport sysfs file.
4096  * @dev: Pointer to class device object.
4097  * @attr: device attribute structure.
4098  * @buf: Data buffer.
4099  *
4100  * This function is the read call back function for
4101  * lpfc_static_vport sysfs file. The lpfc_static_vport
4102  * sysfs file report the mageability of the vport.
4103  **/
4104 static ssize_t
lpfc_static_vport_show(struct device * dev,struct device_attribute * attr,char * buf)4105 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4106 			 char *buf)
4107 {
4108 	struct Scsi_Host  *shost = class_to_shost(dev);
4109 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4110 	if (vport->vport_flag & STATIC_VPORT)
4111 		sprintf(buf, "1\n");
4112 	else
4113 		sprintf(buf, "0\n");
4114 
4115 	return strlen(buf);
4116 }
4117 
4118 /*
4119  * Sysfs attribute to control the statistical data collection.
4120  */
4121 static DEVICE_ATTR_RO(lpfc_static_vport);
4122 
4123 /**
4124  * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4125  * @dev: Pointer to class device.
4126  * @attr: Unused.
4127  * @buf: Data buffer.
4128  * @count: Size of the data buffer.
4129  *
4130  * This function get called when a user write to the lpfc_stat_data_ctrl
4131  * sysfs file. This function parse the command written to the sysfs file
4132  * and take appropriate action. These commands are used for controlling
4133  * driver statistical data collection.
4134  * Following are the command this function handles.
4135  *
4136  *    setbucket <bucket_type> <base> <step>
4137  *			       = Set the latency buckets.
4138  *    destroybucket            = destroy all the buckets.
4139  *    start                    = start data collection
4140  *    stop                     = stop data collection
4141  *    reset                    = reset the collected data
4142  **/
4143 static ssize_t
lpfc_stat_data_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4144 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4145 			  const char *buf, size_t count)
4146 {
4147 	struct Scsi_Host  *shost = class_to_shost(dev);
4148 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4149 	struct lpfc_hba   *phba = vport->phba;
4150 #define LPFC_MAX_DATA_CTRL_LEN 1024
4151 	static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4152 	unsigned long i;
4153 	char *str_ptr, *token;
4154 	struct lpfc_vport **vports;
4155 	struct Scsi_Host *v_shost;
4156 	char *bucket_type_str, *base_str, *step_str;
4157 	unsigned long base, step, bucket_type;
4158 
4159 	if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4160 		if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4161 			return -EINVAL;
4162 
4163 		strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4164 		str_ptr = &bucket_data[0];
4165 		/* Ignore this token - this is command token */
4166 		token = strsep(&str_ptr, "\t ");
4167 		if (!token)
4168 			return -EINVAL;
4169 
4170 		bucket_type_str = strsep(&str_ptr, "\t ");
4171 		if (!bucket_type_str)
4172 			return -EINVAL;
4173 
4174 		if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4175 			bucket_type = LPFC_LINEAR_BUCKET;
4176 		else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4177 			bucket_type = LPFC_POWER2_BUCKET;
4178 		else
4179 			return -EINVAL;
4180 
4181 		base_str = strsep(&str_ptr, "\t ");
4182 		if (!base_str)
4183 			return -EINVAL;
4184 		base = simple_strtoul(base_str, NULL, 0);
4185 
4186 		step_str = strsep(&str_ptr, "\t ");
4187 		if (!step_str)
4188 			return -EINVAL;
4189 		step = simple_strtoul(step_str, NULL, 0);
4190 		if (!step)
4191 			return -EINVAL;
4192 
4193 		/* Block the data collection for every vport */
4194 		vports = lpfc_create_vport_work_array(phba);
4195 		if (vports == NULL)
4196 			return -ENOMEM;
4197 
4198 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4199 			v_shost = lpfc_shost_from_vport(vports[i]);
4200 			spin_lock_irq(v_shost->host_lock);
4201 			/* Block and reset data collection */
4202 			vports[i]->stat_data_blocked = 1;
4203 			if (vports[i]->stat_data_enabled)
4204 				lpfc_vport_reset_stat_data(vports[i]);
4205 			spin_unlock_irq(v_shost->host_lock);
4206 		}
4207 
4208 		/* Set the bucket attributes */
4209 		phba->bucket_type = bucket_type;
4210 		phba->bucket_base = base;
4211 		phba->bucket_step = step;
4212 
4213 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4214 			v_shost = lpfc_shost_from_vport(vports[i]);
4215 
4216 			/* Unblock data collection */
4217 			spin_lock_irq(v_shost->host_lock);
4218 			vports[i]->stat_data_blocked = 0;
4219 			spin_unlock_irq(v_shost->host_lock);
4220 		}
4221 		lpfc_destroy_vport_work_array(phba, vports);
4222 		return strlen(buf);
4223 	}
4224 
4225 	if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4226 		vports = lpfc_create_vport_work_array(phba);
4227 		if (vports == NULL)
4228 			return -ENOMEM;
4229 
4230 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4231 			v_shost = lpfc_shost_from_vport(vports[i]);
4232 			spin_lock_irq(shost->host_lock);
4233 			vports[i]->stat_data_blocked = 1;
4234 			lpfc_free_bucket(vport);
4235 			vport->stat_data_enabled = 0;
4236 			vports[i]->stat_data_blocked = 0;
4237 			spin_unlock_irq(shost->host_lock);
4238 		}
4239 		lpfc_destroy_vport_work_array(phba, vports);
4240 		phba->bucket_type = LPFC_NO_BUCKET;
4241 		phba->bucket_base = 0;
4242 		phba->bucket_step = 0;
4243 		return strlen(buf);
4244 	}
4245 
4246 	if (!strncmp(buf, "start", strlen("start"))) {
4247 		/* If no buckets configured return error */
4248 		if (phba->bucket_type == LPFC_NO_BUCKET)
4249 			return -EINVAL;
4250 		spin_lock_irq(shost->host_lock);
4251 		if (vport->stat_data_enabled) {
4252 			spin_unlock_irq(shost->host_lock);
4253 			return strlen(buf);
4254 		}
4255 		lpfc_alloc_bucket(vport);
4256 		vport->stat_data_enabled = 1;
4257 		spin_unlock_irq(shost->host_lock);
4258 		return strlen(buf);
4259 	}
4260 
4261 	if (!strncmp(buf, "stop", strlen("stop"))) {
4262 		spin_lock_irq(shost->host_lock);
4263 		if (vport->stat_data_enabled == 0) {
4264 			spin_unlock_irq(shost->host_lock);
4265 			return strlen(buf);
4266 		}
4267 		lpfc_free_bucket(vport);
4268 		vport->stat_data_enabled = 0;
4269 		spin_unlock_irq(shost->host_lock);
4270 		return strlen(buf);
4271 	}
4272 
4273 	if (!strncmp(buf, "reset", strlen("reset"))) {
4274 		if ((phba->bucket_type == LPFC_NO_BUCKET)
4275 			|| !vport->stat_data_enabled)
4276 			return strlen(buf);
4277 		spin_lock_irq(shost->host_lock);
4278 		vport->stat_data_blocked = 1;
4279 		lpfc_vport_reset_stat_data(vport);
4280 		vport->stat_data_blocked = 0;
4281 		spin_unlock_irq(shost->host_lock);
4282 		return strlen(buf);
4283 	}
4284 	return -EINVAL;
4285 }
4286 
4287 
4288 /**
4289  * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4290  * @dev: Pointer to class device.
4291  * @attr: Unused.
4292  * @buf: Data buffer.
4293  *
4294  * This function is the read call back function for
4295  * lpfc_stat_data_ctrl sysfs file. This function report the
4296  * current statistical data collection state.
4297  **/
4298 static ssize_t
lpfc_stat_data_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)4299 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4300 			 char *buf)
4301 {
4302 	struct Scsi_Host  *shost = class_to_shost(dev);
4303 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4304 	struct lpfc_hba   *phba = vport->phba;
4305 	int index = 0;
4306 	int i;
4307 	char *bucket_type;
4308 	unsigned long bucket_value;
4309 
4310 	switch (phba->bucket_type) {
4311 	case LPFC_LINEAR_BUCKET:
4312 		bucket_type = "linear";
4313 		break;
4314 	case LPFC_POWER2_BUCKET:
4315 		bucket_type = "power2";
4316 		break;
4317 	default:
4318 		bucket_type = "No Bucket";
4319 		break;
4320 	}
4321 
4322 	sprintf(&buf[index], "Statistical Data enabled :%d, "
4323 		"blocked :%d, Bucket type :%s, Bucket base :%d,"
4324 		" Bucket step :%d\nLatency Ranges :",
4325 		vport->stat_data_enabled, vport->stat_data_blocked,
4326 		bucket_type, phba->bucket_base, phba->bucket_step);
4327 	index = strlen(buf);
4328 	if (phba->bucket_type != LPFC_NO_BUCKET) {
4329 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4330 			if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4331 				bucket_value = phba->bucket_base +
4332 					phba->bucket_step * i;
4333 			else
4334 				bucket_value = phba->bucket_base +
4335 				(1 << i) * phba->bucket_step;
4336 
4337 			if (index + 10 > PAGE_SIZE)
4338 				break;
4339 			sprintf(&buf[index], "%08ld ", bucket_value);
4340 			index = strlen(buf);
4341 		}
4342 	}
4343 	sprintf(&buf[index], "\n");
4344 	return strlen(buf);
4345 }
4346 
4347 /*
4348  * Sysfs attribute to control the statistical data collection.
4349  */
4350 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4351 
4352 /*
4353  * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4354  */
4355 
4356 /*
4357  * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4358  * for each target.
4359  */
4360 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4361 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4362 	STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4363 
4364 
4365 /**
4366  * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4367  * @filp: sysfs file
4368  * @kobj: Pointer to the kernel object
4369  * @bin_attr: Attribute object
4370  * @buf: Buffer pointer
4371  * @off: File offset
4372  * @count: Buffer size
4373  *
4374  * This function is the read call back function for lpfc_drvr_stat_data
4375  * sysfs file. This function export the statistical data to user
4376  * applications.
4377  **/
4378 static ssize_t
sysfs_drvr_stat_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4379 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4380 		struct bin_attribute *bin_attr,
4381 		char *buf, loff_t off, size_t count)
4382 {
4383 	struct device *dev = container_of(kobj, struct device,
4384 		kobj);
4385 	struct Scsi_Host  *shost = class_to_shost(dev);
4386 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4387 	struct lpfc_hba   *phba = vport->phba;
4388 	int i = 0, index = 0;
4389 	unsigned long nport_index;
4390 	struct lpfc_nodelist *ndlp = NULL;
4391 	nport_index = (unsigned long)off /
4392 		MAX_STAT_DATA_SIZE_PER_TARGET;
4393 
4394 	if (!vport->stat_data_enabled || vport->stat_data_blocked
4395 		|| (phba->bucket_type == LPFC_NO_BUCKET))
4396 		return 0;
4397 
4398 	spin_lock_irq(shost->host_lock);
4399 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4400 		if (!ndlp->lat_data)
4401 			continue;
4402 
4403 		if (nport_index > 0) {
4404 			nport_index--;
4405 			continue;
4406 		}
4407 
4408 		if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4409 			> count)
4410 			break;
4411 
4412 		if (!ndlp->lat_data)
4413 			continue;
4414 
4415 		/* Print the WWN */
4416 		sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4417 			ndlp->nlp_portname.u.wwn[0],
4418 			ndlp->nlp_portname.u.wwn[1],
4419 			ndlp->nlp_portname.u.wwn[2],
4420 			ndlp->nlp_portname.u.wwn[3],
4421 			ndlp->nlp_portname.u.wwn[4],
4422 			ndlp->nlp_portname.u.wwn[5],
4423 			ndlp->nlp_portname.u.wwn[6],
4424 			ndlp->nlp_portname.u.wwn[7]);
4425 
4426 		index = strlen(buf);
4427 
4428 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4429 			sprintf(&buf[index], "%010u,",
4430 				ndlp->lat_data[i].cmd_count);
4431 			index = strlen(buf);
4432 		}
4433 		sprintf(&buf[index], "\n");
4434 		index = strlen(buf);
4435 	}
4436 	spin_unlock_irq(shost->host_lock);
4437 	return index;
4438 }
4439 
4440 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4441 	.attr = {
4442 		.name = "lpfc_drvr_stat_data",
4443 		.mode = S_IRUSR,
4444 	},
4445 	.size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4446 	.read = sysfs_drvr_stat_data_read,
4447 	.write = NULL,
4448 };
4449 
4450 /*
4451 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4452 # connection.
4453 # Value range is [0,16]. Default value is 0.
4454 */
4455 /**
4456  * lpfc_link_speed_store - Set the adapters link speed
4457  * @dev: Pointer to class device.
4458  * @attr: Unused.
4459  * @buf: Data buffer.
4460  * @count: Size of the data buffer.
4461  *
4462  * Description:
4463  * If val is in a valid range then set the adapter's link speed field and
4464  * issue a lip; if the lip fails reset the link speed to the old value.
4465  *
4466  * Notes:
4467  * If the value is not in range log a kernel error message and return an error.
4468  *
4469  * Returns:
4470  * zero if val is in range and lip okay.
4471  * non-zero return value from lpfc_issue_lip()
4472  * -EINVAL val out of range
4473  **/
4474 static ssize_t
lpfc_link_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4475 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4476 		const char *buf, size_t count)
4477 {
4478 	struct Scsi_Host  *shost = class_to_shost(dev);
4479 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4480 	struct lpfc_hba   *phba = vport->phba;
4481 	int val = LPFC_USER_LINK_SPEED_AUTO;
4482 	int nolip = 0;
4483 	const char *val_buf = buf;
4484 	int err;
4485 	uint32_t prev_val, if_type;
4486 
4487 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4488 	if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4489 	    phba->hba_flag & HBA_FORCED_LINK_SPEED)
4490 		return -EPERM;
4491 
4492 	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4493 		nolip = 1;
4494 		val_buf = &buf[strlen("nolip ")];
4495 	}
4496 
4497 	if (!isdigit(val_buf[0]))
4498 		return -EINVAL;
4499 	if (sscanf(val_buf, "%i", &val) != 1)
4500 		return -EINVAL;
4501 
4502 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4503 		"3055 lpfc_link_speed changed from %d to %d %s\n",
4504 		phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4505 
4506 	if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4507 	    ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4508 	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4509 	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4510 	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4511 	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4512 	    ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4513 	    ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4514 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4515 				"2879 lpfc_link_speed attribute cannot be set "
4516 				"to %d. Speed is not supported by this port.\n",
4517 				val);
4518 		return -EINVAL;
4519 	}
4520 	if (val >= LPFC_USER_LINK_SPEED_16G &&
4521 	    phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4522 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4523 				"3112 lpfc_link_speed attribute cannot be set "
4524 				"to %d. Speed is not supported in loop mode.\n",
4525 				val);
4526 		return -EINVAL;
4527 	}
4528 
4529 	switch (val) {
4530 	case LPFC_USER_LINK_SPEED_AUTO:
4531 	case LPFC_USER_LINK_SPEED_1G:
4532 	case LPFC_USER_LINK_SPEED_2G:
4533 	case LPFC_USER_LINK_SPEED_4G:
4534 	case LPFC_USER_LINK_SPEED_8G:
4535 	case LPFC_USER_LINK_SPEED_16G:
4536 	case LPFC_USER_LINK_SPEED_32G:
4537 	case LPFC_USER_LINK_SPEED_64G:
4538 		prev_val = phba->cfg_link_speed;
4539 		phba->cfg_link_speed = val;
4540 		if (nolip)
4541 			return strlen(buf);
4542 
4543 		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4544 		if (err) {
4545 			phba->cfg_link_speed = prev_val;
4546 			return -EINVAL;
4547 		}
4548 		return strlen(buf);
4549 	default:
4550 		break;
4551 	}
4552 
4553 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4554 			"0469 lpfc_link_speed attribute cannot be set to %d, "
4555 			"allowed values are [%s]\n",
4556 			val, LPFC_LINK_SPEED_STRING);
4557 	return -EINVAL;
4558 
4559 }
4560 
4561 static int lpfc_link_speed = 0;
4562 module_param(lpfc_link_speed, int, S_IRUGO);
4563 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)4564 lpfc_param_show(link_speed)
4565 
4566 /**
4567  * lpfc_link_speed_init - Set the adapters link speed
4568  * @phba: lpfc_hba pointer.
4569  * @val: link speed value.
4570  *
4571  * Description:
4572  * If val is in a valid range then set the adapter's link speed field.
4573  *
4574  * Notes:
4575  * If the value is not in range log a kernel error message, clear the link
4576  * speed and return an error.
4577  *
4578  * Returns:
4579  * zero if val saved.
4580  * -EINVAL val out of range
4581  **/
4582 static int
4583 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4584 {
4585 	if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4586 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4587 			"3111 lpfc_link_speed of %d cannot "
4588 			"support loop mode, setting topology to default.\n",
4589 			 val);
4590 		phba->cfg_topology = 0;
4591 	}
4592 
4593 	switch (val) {
4594 	case LPFC_USER_LINK_SPEED_AUTO:
4595 	case LPFC_USER_LINK_SPEED_1G:
4596 	case LPFC_USER_LINK_SPEED_2G:
4597 	case LPFC_USER_LINK_SPEED_4G:
4598 	case LPFC_USER_LINK_SPEED_8G:
4599 	case LPFC_USER_LINK_SPEED_16G:
4600 	case LPFC_USER_LINK_SPEED_32G:
4601 	case LPFC_USER_LINK_SPEED_64G:
4602 		phba->cfg_link_speed = val;
4603 		return 0;
4604 	default:
4605 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4606 				"0405 lpfc_link_speed attribute cannot "
4607 				"be set to %d, allowed values are "
4608 				"["LPFC_LINK_SPEED_STRING"]\n", val);
4609 		phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4610 		return -EINVAL;
4611 	}
4612 }
4613 
4614 static DEVICE_ATTR_RW(lpfc_link_speed);
4615 
4616 /*
4617 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4618 #       0  = aer disabled or not supported
4619 #       1  = aer supported and enabled (default)
4620 # Value range is [0,1]. Default value is 1.
4621 */
4622 LPFC_ATTR(aer_support, 1, 0, 1,
4623 	"Enable PCIe device AER support");
lpfc_param_show(aer_support)4624 lpfc_param_show(aer_support)
4625 
4626 /**
4627  * lpfc_aer_support_store - Set the adapter for aer support
4628  *
4629  * @dev: class device that is converted into a Scsi_host.
4630  * @attr: device attribute, not used.
4631  * @buf: containing enable or disable aer flag.
4632  * @count: unused variable.
4633  *
4634  * Description:
4635  * If the val is 1 and currently the device's AER capability was not
4636  * enabled, invoke the kernel's enable AER helper routine, trying to
4637  * enable the device's AER capability. If the helper routine enabling
4638  * AER returns success, update the device's cfg_aer_support flag to
4639  * indicate AER is supported by the device; otherwise, if the device
4640  * AER capability is already enabled to support AER, then do nothing.
4641  *
4642  * If the val is 0 and currently the device's AER support was enabled,
4643  * invoke the kernel's disable AER helper routine. After that, update
4644  * the device's cfg_aer_support flag to indicate AER is not supported
4645  * by the device; otherwise, if the device AER capability is already
4646  * disabled from supporting AER, then do nothing.
4647  *
4648  * Returns:
4649  * length of the buf on success if val is in range the intended mode
4650  * is supported.
4651  * -EINVAL if val out of range or intended mode is not supported.
4652  **/
4653 static ssize_t
4654 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4655 		       const char *buf, size_t count)
4656 {
4657 	struct Scsi_Host *shost = class_to_shost(dev);
4658 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4659 	struct lpfc_hba *phba = vport->phba;
4660 	int val = 0, rc = -EINVAL;
4661 
4662 	if (!isdigit(buf[0]))
4663 		return -EINVAL;
4664 	if (sscanf(buf, "%i", &val) != 1)
4665 		return -EINVAL;
4666 
4667 	switch (val) {
4668 	case 0:
4669 		if (phba->hba_flag & HBA_AER_ENABLED) {
4670 			rc = pci_disable_pcie_error_reporting(phba->pcidev);
4671 			if (!rc) {
4672 				spin_lock_irq(&phba->hbalock);
4673 				phba->hba_flag &= ~HBA_AER_ENABLED;
4674 				spin_unlock_irq(&phba->hbalock);
4675 				phba->cfg_aer_support = 0;
4676 				rc = strlen(buf);
4677 			} else
4678 				rc = -EPERM;
4679 		} else {
4680 			phba->cfg_aer_support = 0;
4681 			rc = strlen(buf);
4682 		}
4683 		break;
4684 	case 1:
4685 		if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4686 			rc = pci_enable_pcie_error_reporting(phba->pcidev);
4687 			if (!rc) {
4688 				spin_lock_irq(&phba->hbalock);
4689 				phba->hba_flag |= HBA_AER_ENABLED;
4690 				spin_unlock_irq(&phba->hbalock);
4691 				phba->cfg_aer_support = 1;
4692 				rc = strlen(buf);
4693 			} else
4694 				 rc = -EPERM;
4695 		} else {
4696 			phba->cfg_aer_support = 1;
4697 			rc = strlen(buf);
4698 		}
4699 		break;
4700 	default:
4701 		rc = -EINVAL;
4702 		break;
4703 	}
4704 	return rc;
4705 }
4706 
4707 static DEVICE_ATTR_RW(lpfc_aer_support);
4708 
4709 /**
4710  * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4711  * @dev: class device that is converted into a Scsi_host.
4712  * @attr: device attribute, not used.
4713  * @buf: containing flag 1 for aer cleanup state.
4714  * @count: unused variable.
4715  *
4716  * Description:
4717  * If the @buf contains 1 and the device currently has the AER support
4718  * enabled, then invokes the kernel AER helper routine
4719  * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4720  * error status register.
4721  *
4722  * Notes:
4723  *
4724  * Returns:
4725  * -EINVAL if the buf does not contain the 1 or the device is not currently
4726  * enabled with the AER support.
4727  **/
4728 static ssize_t
lpfc_aer_cleanup_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4729 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4730 		       const char *buf, size_t count)
4731 {
4732 	struct Scsi_Host  *shost = class_to_shost(dev);
4733 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4734 	struct lpfc_hba   *phba = vport->phba;
4735 	int val, rc = -1;
4736 
4737 	if (!isdigit(buf[0]))
4738 		return -EINVAL;
4739 	if (sscanf(buf, "%i", &val) != 1)
4740 		return -EINVAL;
4741 	if (val != 1)
4742 		return -EINVAL;
4743 
4744 	if (phba->hba_flag & HBA_AER_ENABLED)
4745 		rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4746 
4747 	if (rc == 0)
4748 		return strlen(buf);
4749 	else
4750 		return -EPERM;
4751 }
4752 
4753 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4754 		   lpfc_aer_cleanup_state);
4755 
4756 /**
4757  * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4758  *
4759  * @dev: class device that is converted into a Scsi_host.
4760  * @attr: device attribute, not used.
4761  * @buf: containing the string the number of vfs to be enabled.
4762  * @count: unused variable.
4763  *
4764  * Description:
4765  * When this api is called either through user sysfs, the driver shall
4766  * try to enable or disable SR-IOV virtual functions according to the
4767  * following:
4768  *
4769  * If zero virtual function has been enabled to the physical function,
4770  * the driver shall invoke the pci enable virtual function api trying
4771  * to enable the virtual functions. If the nr_vfn provided is greater
4772  * than the maximum supported, the maximum virtual function number will
4773  * be used for invoking the api; otherwise, the nr_vfn provided shall
4774  * be used for invoking the api. If the api call returned success, the
4775  * actual number of virtual functions enabled will be set to the driver
4776  * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4777  * cfg_sriov_nr_virtfn remains zero.
4778  *
4779  * If none-zero virtual functions have already been enabled to the
4780  * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4781  * -EINVAL will be returned and the driver does nothing;
4782  *
4783  * If the nr_vfn provided is zero and none-zero virtual functions have
4784  * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4785  * disabling virtual function api shall be invoded to disable all the
4786  * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4787  * zero. Otherwise, if zero virtual function has been enabled, do
4788  * nothing.
4789  *
4790  * Returns:
4791  * length of the buf on success if val is in range the intended mode
4792  * is supported.
4793  * -EINVAL if val out of range or intended mode is not supported.
4794  **/
4795 static ssize_t
lpfc_sriov_nr_virtfn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4796 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4797 			 const char *buf, size_t count)
4798 {
4799 	struct Scsi_Host *shost = class_to_shost(dev);
4800 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4801 	struct lpfc_hba *phba = vport->phba;
4802 	struct pci_dev *pdev = phba->pcidev;
4803 	int val = 0, rc = -EINVAL;
4804 
4805 	/* Sanity check on user data */
4806 	if (!isdigit(buf[0]))
4807 		return -EINVAL;
4808 	if (sscanf(buf, "%i", &val) != 1)
4809 		return -EINVAL;
4810 	if (val < 0)
4811 		return -EINVAL;
4812 
4813 	/* Request disabling virtual functions */
4814 	if (val == 0) {
4815 		if (phba->cfg_sriov_nr_virtfn > 0) {
4816 			pci_disable_sriov(pdev);
4817 			phba->cfg_sriov_nr_virtfn = 0;
4818 		}
4819 		return strlen(buf);
4820 	}
4821 
4822 	/* Request enabling virtual functions */
4823 	if (phba->cfg_sriov_nr_virtfn > 0) {
4824 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4825 				"3018 There are %d virtual functions "
4826 				"enabled on physical function.\n",
4827 				phba->cfg_sriov_nr_virtfn);
4828 		return -EEXIST;
4829 	}
4830 
4831 	if (val <= LPFC_MAX_VFN_PER_PFN)
4832 		phba->cfg_sriov_nr_virtfn = val;
4833 	else {
4834 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4835 				"3019 Enabling %d virtual functions is not "
4836 				"allowed.\n", val);
4837 		return -EINVAL;
4838 	}
4839 
4840 	rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4841 	if (rc) {
4842 		phba->cfg_sriov_nr_virtfn = 0;
4843 		rc = -EPERM;
4844 	} else
4845 		rc = strlen(buf);
4846 
4847 	return rc;
4848 }
4849 
4850 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4851 	"Enable PCIe device SR-IOV virtual fn");
4852 
4853 lpfc_param_show(sriov_nr_virtfn)
4854 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4855 
4856 /**
4857  * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
4858  *
4859  * @dev: class device that is converted into a Scsi_host.
4860  * @attr: device attribute, not used.
4861  * @buf: containing the string the number of vfs to be enabled.
4862  * @count: unused variable.
4863  *
4864  * Description:
4865  *
4866  * Returns:
4867  * length of the buf on success if val is in range the intended mode
4868  * is supported.
4869  * -EINVAL if val out of range or intended mode is not supported.
4870  **/
4871 static ssize_t
lpfc_request_firmware_upgrade_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4872 lpfc_request_firmware_upgrade_store(struct device *dev,
4873 				    struct device_attribute *attr,
4874 				    const char *buf, size_t count)
4875 {
4876 	struct Scsi_Host *shost = class_to_shost(dev);
4877 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4878 	struct lpfc_hba *phba = vport->phba;
4879 	int val = 0, rc;
4880 
4881 	/* Sanity check on user data */
4882 	if (!isdigit(buf[0]))
4883 		return -EINVAL;
4884 	if (sscanf(buf, "%i", &val) != 1)
4885 		return -EINVAL;
4886 	if (val != 1)
4887 		return -EINVAL;
4888 
4889 	rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4890 	if (rc)
4891 		rc = -EPERM;
4892 	else
4893 		rc = strlen(buf);
4894 	return rc;
4895 }
4896 
4897 static int lpfc_req_fw_upgrade;
4898 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4899 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)4900 lpfc_param_show(request_firmware_upgrade)
4901 
4902 /**
4903  * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
4904  * @phba: lpfc_hba pointer.
4905  * @val: 0 or 1.
4906  *
4907  * Description:
4908  * Set the initial Linux generic firmware upgrade enable or disable flag.
4909  *
4910  * Returns:
4911  * zero if val saved.
4912  * -EINVAL val out of range
4913  **/
4914 static int
4915 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4916 {
4917 	if (val >= 0 && val <= 1) {
4918 		phba->cfg_request_firmware_upgrade = val;
4919 		return 0;
4920 	}
4921 	return -EINVAL;
4922 }
4923 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4924 		   lpfc_request_firmware_upgrade_show,
4925 		   lpfc_request_firmware_upgrade_store);
4926 
4927 /**
4928  * lpfc_force_rscn_store
4929  *
4930  * @dev: class device that is converted into a Scsi_host.
4931  * @attr: device attribute, not used.
4932  * @buf: unused string
4933  * @count: unused variable.
4934  *
4935  * Description:
4936  * Force the switch to send a RSCN to all other NPorts in our zone
4937  * If we are direct connect pt2pt, build the RSCN command ourself
4938  * and send to the other NPort. Not supported for private loop.
4939  *
4940  * Returns:
4941  * 0      - on success
4942  * -EIO   - if command is not sent
4943  **/
4944 static ssize_t
lpfc_force_rscn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4945 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4946 		      const char *buf, size_t count)
4947 {
4948 	struct Scsi_Host *shost = class_to_shost(dev);
4949 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4950 	int i;
4951 
4952 	i = lpfc_issue_els_rscn(vport, 0);
4953 	if (i)
4954 		return -EIO;
4955 	return strlen(buf);
4956 }
4957 
4958 /*
4959  * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
4960  * connected to  the HBA.
4961  *
4962  * Value range is any ascii value
4963  */
4964 static int lpfc_force_rscn;
4965 module_param(lpfc_force_rscn, int, 0644);
4966 MODULE_PARM_DESC(lpfc_force_rscn,
4967 		 "Force an RSCN to be sent to all remote NPorts");
lpfc_param_show(force_rscn)4968 lpfc_param_show(force_rscn)
4969 
4970 /**
4971  * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
4972  * @phba: lpfc_hba pointer.
4973  * @val: unused value.
4974  *
4975  * Returns:
4976  * zero if val saved.
4977  **/
4978 static int
4979 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4980 {
4981 	return 0;
4982 }
4983 static DEVICE_ATTR_RW(lpfc_force_rscn);
4984 
4985 /**
4986  * lpfc_fcp_imax_store
4987  *
4988  * @dev: class device that is converted into a Scsi_host.
4989  * @attr: device attribute, not used.
4990  * @buf: string with the number of fast-path FCP interrupts per second.
4991  * @count: unused variable.
4992  *
4993  * Description:
4994  * If val is in a valid range [636,651042], then set the adapter's
4995  * maximum number of fast-path FCP interrupts per second.
4996  *
4997  * Returns:
4998  * length of the buf on success if val is in range the intended mode
4999  * is supported.
5000  * -EINVAL if val out of range or intended mode is not supported.
5001  **/
5002 static ssize_t
lpfc_fcp_imax_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5003 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5004 			 const char *buf, size_t count)
5005 {
5006 	struct Scsi_Host *shost = class_to_shost(dev);
5007 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5008 	struct lpfc_hba *phba = vport->phba;
5009 	struct lpfc_eq_intr_info *eqi;
5010 	uint32_t usdelay;
5011 	int val = 0, i;
5012 
5013 	/* fcp_imax is only valid for SLI4 */
5014 	if (phba->sli_rev != LPFC_SLI_REV4)
5015 		return -EINVAL;
5016 
5017 	/* Sanity check on user data */
5018 	if (!isdigit(buf[0]))
5019 		return -EINVAL;
5020 	if (sscanf(buf, "%i", &val) != 1)
5021 		return -EINVAL;
5022 
5023 	/*
5024 	 * Value range for the HBA is [5000,5000000]
5025 	 * The value for each EQ depends on how many EQs are configured.
5026 	 * Allow value == 0
5027 	 */
5028 	if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5029 		return -EINVAL;
5030 
5031 	phba->cfg_auto_imax = (val) ? 0 : 1;
5032 	if (phba->cfg_fcp_imax && !val) {
5033 		queue_delayed_work(phba->wq, &phba->eq_delay_work,
5034 				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5035 
5036 		for_each_present_cpu(i) {
5037 			eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5038 			eqi->icnt = 0;
5039 		}
5040 	}
5041 
5042 	phba->cfg_fcp_imax = (uint32_t)val;
5043 
5044 	if (phba->cfg_fcp_imax)
5045 		usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5046 	else
5047 		usdelay = 0;
5048 
5049 	for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5050 		lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5051 					 usdelay);
5052 
5053 	return strlen(buf);
5054 }
5055 
5056 /*
5057 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5058 # for the HBA.
5059 #
5060 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5061 */
5062 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5063 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5064 MODULE_PARM_DESC(lpfc_fcp_imax,
5065 	    "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)5066 lpfc_param_show(fcp_imax)
5067 
5068 /**
5069  * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5070  * @phba: lpfc_hba pointer.
5071  * @val: link speed value.
5072  *
5073  * Description:
5074  * If val is in a valid range [636,651042], then initialize the adapter's
5075  * maximum number of fast-path FCP interrupts per second.
5076  *
5077  * Returns:
5078  * zero if val saved.
5079  * -EINVAL val out of range
5080  **/
5081 static int
5082 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5083 {
5084 	if (phba->sli_rev != LPFC_SLI_REV4) {
5085 		phba->cfg_fcp_imax = 0;
5086 		return 0;
5087 	}
5088 
5089 	if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5090 	    (val == 0)) {
5091 		phba->cfg_fcp_imax = val;
5092 		return 0;
5093 	}
5094 
5095 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5096 			"3016 lpfc_fcp_imax: %d out of range, using default\n",
5097 			val);
5098 	phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5099 
5100 	return 0;
5101 }
5102 
5103 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5104 
5105 /**
5106  * lpfc_cq_max_proc_limit_store
5107  *
5108  * @dev: class device that is converted into a Scsi_host.
5109  * @attr: device attribute, not used.
5110  * @buf: string with the cq max processing limit of cqes
5111  * @count: unused variable.
5112  *
5113  * Description:
5114  * If val is in a valid range, then set value on each cq
5115  *
5116  * Returns:
5117  * The length of the buf: if successful
5118  * -ERANGE: if val is not in the valid range
5119  * -EINVAL: if bad value format or intended mode is not supported.
5120  **/
5121 static ssize_t
lpfc_cq_max_proc_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5122 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5123 			 const char *buf, size_t count)
5124 {
5125 	struct Scsi_Host *shost = class_to_shost(dev);
5126 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5127 	struct lpfc_hba *phba = vport->phba;
5128 	struct lpfc_queue *eq, *cq;
5129 	unsigned long val;
5130 	int i;
5131 
5132 	/* cq_max_proc_limit is only valid for SLI4 */
5133 	if (phba->sli_rev != LPFC_SLI_REV4)
5134 		return -EINVAL;
5135 
5136 	/* Sanity check on user data */
5137 	if (!isdigit(buf[0]))
5138 		return -EINVAL;
5139 	if (kstrtoul(buf, 0, &val))
5140 		return -EINVAL;
5141 
5142 	if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5143 		return -ERANGE;
5144 
5145 	phba->cfg_cq_max_proc_limit = (uint32_t)val;
5146 
5147 	/* set the values on the cq's */
5148 	for (i = 0; i < phba->cfg_irq_chann; i++) {
5149 		/* Get the EQ corresponding to the IRQ vector */
5150 		eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5151 		if (!eq)
5152 			continue;
5153 
5154 		list_for_each_entry(cq, &eq->child_list, list)
5155 			cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5156 						 cq->entry_count);
5157 	}
5158 
5159 	return strlen(buf);
5160 }
5161 
5162 /*
5163  * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5164  *   itteration of CQ processing.
5165  */
5166 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5167 module_param(lpfc_cq_max_proc_limit, int, 0644);
5168 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5169 	    "Set the maximum number CQEs processed in an iteration of "
5170 	    "CQ processing");
5171 lpfc_param_show(cq_max_proc_limit)
5172 
5173 /*
5174  * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5175  *   single handler call which should request a polled completion rather
5176  *   than re-enabling interrupts.
5177  */
5178 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5179 	     LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5180 	     LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5181 	     "CQE Processing Threshold to enable Polling");
5182 
5183 /**
5184  * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5185  * @phba: lpfc_hba pointer.
5186  * @val: entry limit
5187  *
5188  * Description:
5189  * If val is in a valid range, then initialize the adapter's maximum
5190  * value.
5191  *
5192  * Returns:
5193  *  Always returns 0 for success, even if value not always set to
5194  *  requested value. If value out of range or not supported, will fall
5195  *  back to default.
5196  **/
5197 static int
lpfc_cq_max_proc_limit_init(struct lpfc_hba * phba,int val)5198 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5199 {
5200 	phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5201 
5202 	if (phba->sli_rev != LPFC_SLI_REV4)
5203 		return 0;
5204 
5205 	if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5206 		phba->cfg_cq_max_proc_limit = val;
5207 		return 0;
5208 	}
5209 
5210 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5211 			"0371 lpfc_cq_max_proc_limit: %d out of range, using "
5212 			"default\n",
5213 			phba->cfg_cq_max_proc_limit);
5214 
5215 	return 0;
5216 }
5217 
5218 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5219 
5220 /**
5221  * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
5222  * @dev: class converted to a Scsi_host structure.
5223  * @attr: device attribute, not used.
5224  * @buf: on return contains text describing the state of the link.
5225  *
5226  * Returns: size of formatted string.
5227  **/
5228 static ssize_t
lpfc_fcp_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)5229 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5230 		      char *buf)
5231 {
5232 	struct Scsi_Host  *shost = class_to_shost(dev);
5233 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5234 	struct lpfc_hba   *phba = vport->phba;
5235 	struct lpfc_vector_map_info *cpup;
5236 	int  len = 0;
5237 
5238 	if ((phba->sli_rev != LPFC_SLI_REV4) ||
5239 	    (phba->intr_type != MSIX))
5240 		return len;
5241 
5242 	switch (phba->cfg_fcp_cpu_map) {
5243 	case 0:
5244 		len += scnprintf(buf + len, PAGE_SIZE-len,
5245 				"fcp_cpu_map: No mapping (%d)\n",
5246 				phba->cfg_fcp_cpu_map);
5247 		return len;
5248 	case 1:
5249 		len += scnprintf(buf + len, PAGE_SIZE-len,
5250 				"fcp_cpu_map: HBA centric mapping (%d): "
5251 				"%d of %d CPUs online from %d possible CPUs\n",
5252 				phba->cfg_fcp_cpu_map, num_online_cpus(),
5253 				num_present_cpus(),
5254 				phba->sli4_hba.num_possible_cpu);
5255 		break;
5256 	}
5257 
5258 	while (phba->sli4_hba.curr_disp_cpu <
5259 	       phba->sli4_hba.num_possible_cpu) {
5260 		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5261 
5262 		if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5263 			len += scnprintf(buf + len, PAGE_SIZE - len,
5264 					"CPU %02d not present\n",
5265 					phba->sli4_hba.curr_disp_cpu);
5266 		else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5267 			if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5268 				len += scnprintf(
5269 					buf + len, PAGE_SIZE - len,
5270 					"CPU %02d hdwq None "
5271 					"physid %d coreid %d ht %d ua %d\n",
5272 					phba->sli4_hba.curr_disp_cpu,
5273 					cpup->phys_id, cpup->core_id,
5274 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5275 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5276 			else
5277 				len += scnprintf(
5278 					buf + len, PAGE_SIZE - len,
5279 					"CPU %02d EQ None hdwq %04d "
5280 					"physid %d coreid %d ht %d ua %d\n",
5281 					phba->sli4_hba.curr_disp_cpu,
5282 					cpup->hdwq, cpup->phys_id,
5283 					cpup->core_id,
5284 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5285 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5286 		} else {
5287 			if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5288 				len += scnprintf(
5289 					buf + len, PAGE_SIZE - len,
5290 					"CPU %02d hdwq None "
5291 					"physid %d coreid %d ht %d ua %d IRQ %d\n",
5292 					phba->sli4_hba.curr_disp_cpu,
5293 					cpup->phys_id,
5294 					cpup->core_id,
5295 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5296 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5297 					lpfc_get_irq(cpup->eq));
5298 			else
5299 				len += scnprintf(
5300 					buf + len, PAGE_SIZE - len,
5301 					"CPU %02d EQ %04d hdwq %04d "
5302 					"physid %d coreid %d ht %d ua %d IRQ %d\n",
5303 					phba->sli4_hba.curr_disp_cpu,
5304 					cpup->eq, cpup->hdwq, cpup->phys_id,
5305 					cpup->core_id,
5306 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5307 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5308 					lpfc_get_irq(cpup->eq));
5309 		}
5310 
5311 		phba->sli4_hba.curr_disp_cpu++;
5312 
5313 		/* display max number of CPUs keeping some margin */
5314 		if (phba->sli4_hba.curr_disp_cpu <
5315 				phba->sli4_hba.num_possible_cpu &&
5316 				(len >= (PAGE_SIZE - 64))) {
5317 			len += scnprintf(buf + len,
5318 					PAGE_SIZE - len, "more...\n");
5319 			break;
5320 		}
5321 	}
5322 
5323 	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5324 		phba->sli4_hba.curr_disp_cpu = 0;
5325 
5326 	return len;
5327 }
5328 
5329 /**
5330  * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5331  * @dev: class device that is converted into a Scsi_host.
5332  * @attr: device attribute, not used.
5333  * @buf: one or more lpfc_polling_flags values.
5334  * @count: not used.
5335  *
5336  * Returns:
5337  * -EINVAL  - Not implemented yet.
5338  **/
5339 static ssize_t
lpfc_fcp_cpu_map_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5340 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5341 		       const char *buf, size_t count)
5342 {
5343 	return -EINVAL;
5344 }
5345 
5346 /*
5347 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5348 # for the HBA.
5349 #
5350 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5351 #	0 - Do not affinitze IRQ vectors
5352 #	1 - Affintize HBA vectors with respect to each HBA
5353 #	    (start with CPU0 for each HBA)
5354 # This also defines how Hardware Queues are mapped to specific CPUs.
5355 */
5356 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5357 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5358 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5359 		 "Defines how to map CPUs to IRQ vectors per HBA");
5360 
5361 /**
5362  * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5363  * @phba: lpfc_hba pointer.
5364  * @val: link speed value.
5365  *
5366  * Description:
5367  * If val is in a valid range [0-2], then affinitze the adapter's
5368  * MSIX vectors.
5369  *
5370  * Returns:
5371  * zero if val saved.
5372  * -EINVAL val out of range
5373  **/
5374 static int
lpfc_fcp_cpu_map_init(struct lpfc_hba * phba,int val)5375 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5376 {
5377 	if (phba->sli_rev != LPFC_SLI_REV4) {
5378 		phba->cfg_fcp_cpu_map = 0;
5379 		return 0;
5380 	}
5381 
5382 	if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5383 		phba->cfg_fcp_cpu_map = val;
5384 		return 0;
5385 	}
5386 
5387 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5388 			"3326 lpfc_fcp_cpu_map: %d out of range, using "
5389 			"default\n", val);
5390 	phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5391 
5392 	return 0;
5393 }
5394 
5395 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5396 
5397 /*
5398 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
5399 # Value range is [2,3]. Default value is 3.
5400 */
5401 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5402 		  "Select Fibre Channel class of service for FCP sequences");
5403 
5404 /*
5405 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5406 # is [0,1]. Default value is 0.
5407 */
5408 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
5409 		   "Use ADISC on rediscovery to authenticate FCP devices");
5410 
5411 /*
5412 # lpfc_first_burst_size: First burst size to use on the NPorts
5413 # that support first burst.
5414 # Value range is [0,65536]. Default value is 0.
5415 */
5416 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5417 		   "First burst size for Targets that support first burst");
5418 
5419 /*
5420 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5421 * When the driver is configured as an NVME target, this value is
5422 * communicated to the NVME initiator in the PRLI response.  It is
5423 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5424 * parameters are set and the target is sending the PRLI RSP.
5425 * Parameter supported on physical port only - no NPIV support.
5426 * Value range is [0,65536]. Default value is 0.
5427 */
5428 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5429 	     "NVME Target mode first burst size in 512B increments.");
5430 
5431 /*
5432  * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5433  * For the Initiator (I), enabling this parameter means that an NVMET
5434  * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5435  * processed by the initiator for subsequent NVME FCP IO.
5436  * Currently, this feature is not supported on the NVME target
5437  * Value range is [0,1]. Default value is 0 (disabled).
5438  */
5439 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5440 	     "Enable First Burst feature for NVME Initiator.");
5441 
5442 /*
5443 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5444 # depth. Default value is 0. When the value of this parameter is zero the
5445 # SCSI command completion time is not used for controlling I/O queue depth. When
5446 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5447 # to limit the I/O completion time to the parameter value.
5448 # The value is set in milliseconds.
5449 */
5450 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5451 	"Use command completion time to control queue depth");
5452 
5453 lpfc_vport_param_show(max_scsicmpl_time);
5454 static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport * vport,int val)5455 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5456 {
5457 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5458 	struct lpfc_nodelist *ndlp, *next_ndlp;
5459 
5460 	if (val == vport->cfg_max_scsicmpl_time)
5461 		return 0;
5462 	if ((val < 0) || (val > 60000))
5463 		return -EINVAL;
5464 	vport->cfg_max_scsicmpl_time = val;
5465 
5466 	spin_lock_irq(shost->host_lock);
5467 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5468 		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5469 			continue;
5470 		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5471 	}
5472 	spin_unlock_irq(shost->host_lock);
5473 	return 0;
5474 }
5475 lpfc_vport_param_store(max_scsicmpl_time);
5476 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5477 
5478 /*
5479 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5480 # range is [0,1]. Default value is 0.
5481 */
5482 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5483 
5484 /*
5485 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5486 # range is [0,1]. Default value is 1.
5487 */
5488 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5489 
5490 /*
5491  * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5492  * range is [0,1]. Default value is 0.
5493  * For [0], FCP commands are issued to Work Queues based on upper layer
5494  * hardware queue index.
5495  * For [1], FCP commands are issued to a Work Queue associated with the
5496  *          current CPU.
5497  *
5498  * LPFC_FCP_SCHED_BY_HDWQ == 0
5499  * LPFC_FCP_SCHED_BY_CPU == 1
5500  *
5501  * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5502  * affinity for FCP/NVME I/Os through Work Queues associated with the current
5503  * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5504  * through WQs will be used.
5505  */
5506 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5507 	     LPFC_FCP_SCHED_BY_HDWQ,
5508 	     LPFC_FCP_SCHED_BY_CPU,
5509 	     "Determine scheduling algorithm for "
5510 	     "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5511 
5512 /*
5513  * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5514  * range is [0,1]. Default value is 0.
5515  * For [0], GID_FT is used for NameServer queries after RSCN (default)
5516  * For [1], GID_PT is used for NameServer queries after RSCN
5517  *
5518  */
5519 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5520 	     LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5521 	     "Determine algorithm NameServer queries after RSCN "
5522 	     "[0] - GID_FT, [1] - GID_PT");
5523 
5524 /*
5525 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5526 # range is [0,1]. Default value is 0.
5527 # For [0], bus reset issues target reset to ALL devices
5528 # For [1], bus reset issues target reset to non-FCP2 devices
5529 */
5530 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5531 	     "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5532 
5533 
5534 /*
5535 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5536 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5537 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5538 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5539 # cr_delay is set to 0.
5540 */
5541 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5542 		"interrupt response is generated");
5543 
5544 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5545 		"interrupt response is generated");
5546 
5547 /*
5548 # lpfc_multi_ring_support:  Determines how many rings to spread available
5549 # cmd/rsp IOCB entries across.
5550 # Value range is [1,2]. Default value is 1.
5551 */
5552 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5553 		"SLI rings to spread IOCB entries across");
5554 
5555 /*
5556 # lpfc_multi_ring_rctl:  If lpfc_multi_ring_support is enabled, this
5557 # identifies what rctl value to configure the additional ring for.
5558 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5559 */
5560 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5561 	     255, "Identifies RCTL for additional ring configuration");
5562 
5563 /*
5564 # lpfc_multi_ring_type:  If lpfc_multi_ring_support is enabled, this
5565 # identifies what type value to configure the additional ring for.
5566 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5567 */
5568 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5569 	     255, "Identifies TYPE for additional ring configuration");
5570 
5571 /*
5572 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5573 #       0  = SmartSAN functionality disabled (default)
5574 #       1  = SmartSAN functionality enabled
5575 # This parameter will override the value of lpfc_fdmi_on module parameter.
5576 # Value range is [0,1]. Default value is 0.
5577 */
5578 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5579 
5580 /*
5581 # lpfc_fdmi_on: Controls FDMI support.
5582 #       0       No FDMI support
5583 #       1       Traditional FDMI support (default)
5584 # Traditional FDMI support means the driver will assume FDMI-2 support;
5585 # however, if that fails, it will fallback to FDMI-1.
5586 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5587 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5588 # lpfc_fdmi_on.
5589 # Value range [0,1]. Default value is 1.
5590 */
5591 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5592 
5593 /*
5594 # Specifies the maximum number of ELS cmds we can have outstanding (for
5595 # discovery). Value range is [1,64]. Default value = 32.
5596 */
5597 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5598 		 "during discovery");
5599 
5600 /*
5601 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5602 #    will be scanned by the SCSI midlayer when sequential scanning is
5603 #    used; and is also the highest LUN ID allowed when the SCSI midlayer
5604 #    parses REPORT_LUN responses. The lpfc driver has no LUN count or
5605 #    LUN ID limit, but the SCSI midlayer requires this field for the uses
5606 #    above. The lpfc driver limits the default value to 255 for two reasons.
5607 #    As it bounds the sequential scan loop, scanning for thousands of luns
5608 #    on a target can take minutes of wall clock time.  Additionally,
5609 #    there are FC targets, such as JBODs, that only recognize 8-bits of
5610 #    LUN ID. When they receive a value greater than 8 bits, they chop off
5611 #    the high order bits. In other words, they see LUN IDs 0, 256, 512,
5612 #    and so on all as LUN ID 0. This causes the linux kernel, which sees
5613 #    valid responses at each of the LUN IDs, to believe there are multiple
5614 #    devices present, when in fact, there is only 1.
5615 #    A customer that is aware of their target behaviors, and the results as
5616 #    indicated above, is welcome to increase the lpfc_max_luns value.
5617 #    As mentioned, this value is not used by the lpfc driver, only the
5618 #    SCSI midlayer.
5619 # Value range is [0,65535]. Default value is 255.
5620 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5621 */
5622 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5623 
5624 /*
5625 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5626 # Value range is [1,255], default value is 10.
5627 */
5628 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5629 	     "Milliseconds driver will wait between polling FCP ring");
5630 
5631 /*
5632 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5633 # to complete in seconds. Value range is [5,180], default value is 60.
5634 */
5635 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5636 	     "Maximum time to wait for task management commands to complete");
5637 /*
5638 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5639 #		support this feature
5640 #       0  = MSI disabled
5641 #       1  = MSI enabled
5642 #       2  = MSI-X enabled (default)
5643 # Value range is [0,2]. Default value is 2.
5644 */
5645 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5646 	    "MSI-X (2), if possible");
5647 
5648 /*
5649  * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5650  *
5651  *      0  = NVME OAS disabled
5652  *      1  = NVME OAS enabled
5653  *
5654  * Value range is [0,1]. Default value is 0.
5655  */
5656 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5657 	     "Use OAS bit on NVME IOs");
5658 
5659 /*
5660  * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5661  *
5662  *      0  = Put NVME Command in SGL
5663  *      1  = Embed NVME Command in WQE (unless G7)
5664  *      2 =  Embed NVME Command in WQE (force)
5665  *
5666  * Value range is [0,2]. Default value is 1.
5667  */
5668 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5669 	     "Embed NVME Command in WQE");
5670 
5671 /*
5672  * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5673  * the driver will advertise it supports to the SCSI layer.
5674  *
5675  *      0    = Set nr_hw_queues by the number of CPUs or HW queues.
5676  *      1,256 = Manually specify nr_hw_queue value to be advertised,
5677  *
5678  * Value range is [0,256]. Default value is 8.
5679  */
5680 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5681 	    LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5682 	    "Set the number of SCSI Queues advertised");
5683 
5684 /*
5685  * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5686  * will advertise it supports to the NVME and  SCSI layers. This also
5687  * will map to the number of CQ/WQ pairs the driver will create.
5688  *
5689  * The NVME Layer will try to create this many, plus 1 administrative
5690  * hardware queue. The administrative queue will always map to WQ 0
5691  * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5692  *
5693  *      0    = Configure the number of hdw queues to the number of active CPUs.
5694  *      1,256 = Manually specify how many hdw queues to use.
5695  *
5696  * Value range is [0,256]. Default value is 0.
5697  */
5698 LPFC_ATTR_R(hdw_queue,
5699 	    LPFC_HBA_HDWQ_DEF,
5700 	    LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5701 	    "Set the number of I/O Hardware Queues");
5702 
5703 #if IS_ENABLED(CONFIG_X86)
5704 /**
5705  * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5706  *				irq_chann_mode
5707  * @phba: Pointer to HBA context object.
5708  **/
5709 static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba * phba)5710 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5711 {
5712 	unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5713 	const struct cpumask *sibling_mask;
5714 	struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5715 
5716 	cpumask_clear(aff_mask);
5717 
5718 	if (phba->irq_chann_mode == NUMA_MODE) {
5719 		/* Check if we're a NUMA architecture */
5720 		numa_node = dev_to_node(&phba->pcidev->dev);
5721 		if (numa_node == NUMA_NO_NODE) {
5722 			phba->irq_chann_mode = NORMAL_MODE;
5723 			return;
5724 		}
5725 	}
5726 
5727 	for_each_possible_cpu(cpu) {
5728 		switch (phba->irq_chann_mode) {
5729 		case NUMA_MODE:
5730 			if (cpu_to_node(cpu) == numa_node)
5731 				cpumask_set_cpu(cpu, aff_mask);
5732 			break;
5733 		case NHT_MODE:
5734 			sibling_mask = topology_sibling_cpumask(cpu);
5735 			first_cpu = cpumask_first(sibling_mask);
5736 			if (first_cpu < nr_cpu_ids)
5737 				cpumask_set_cpu(first_cpu, aff_mask);
5738 			break;
5739 		default:
5740 			break;
5741 		}
5742 	}
5743 }
5744 #endif
5745 
5746 static void
lpfc_assign_default_irq_chann(struct lpfc_hba * phba)5747 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5748 {
5749 #if IS_ENABLED(CONFIG_X86)
5750 	switch (boot_cpu_data.x86_vendor) {
5751 	case X86_VENDOR_AMD:
5752 		/* If AMD architecture, then default is NUMA_MODE */
5753 		phba->irq_chann_mode = NUMA_MODE;
5754 		break;
5755 	case X86_VENDOR_INTEL:
5756 		/* If Intel architecture, then default is no hyperthread mode */
5757 		phba->irq_chann_mode = NHT_MODE;
5758 		break;
5759 	default:
5760 		phba->irq_chann_mode = NORMAL_MODE;
5761 		break;
5762 	}
5763 	lpfc_cpumask_irq_mode_init(phba);
5764 #else
5765 	phba->irq_chann_mode = NORMAL_MODE;
5766 #endif
5767 }
5768 
5769 /*
5770  * lpfc_irq_chann: Set the number of IRQ vectors that are available
5771  * for Hardware Queues to utilize.  This also will map to the number
5772  * of EQ / MSI-X vectors the driver will create. This should never be
5773  * more than the number of Hardware Queues
5774  *
5775  *	0		= Configure number of IRQ Channels to:
5776  *			  if AMD architecture, number of CPUs on HBA's NUMA node
5777  *			  if Intel architecture, number of physical CPUs.
5778  *			  otherwise, number of active CPUs.
5779  *	[1,256]		= Manually specify how many IRQ Channels to use.
5780  *
5781  * Value range is [0,256]. Default value is [0].
5782  */
5783 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5784 module_param(lpfc_irq_chann, uint, 0444);
5785 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5786 
5787 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5788  * @phba: lpfc_hba pointer.
5789  * @val: contains the initial value
5790  *
5791  * Description:
5792  * Validates the initial value is within range and assigns it to the
5793  * adapter. If not in range, an error message is posted and the
5794  * default value is assigned.
5795  *
5796  * Returns:
5797  * zero if value is in range and is set
5798  * -EINVAL if value was out of range
5799  **/
5800 static int
lpfc_irq_chann_init(struct lpfc_hba * phba,uint32_t val)5801 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5802 {
5803 	const struct cpumask *aff_mask;
5804 
5805 	if (phba->cfg_use_msi != 2) {
5806 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5807 				"8532 use_msi = %u ignoring cfg_irq_numa\n",
5808 				phba->cfg_use_msi);
5809 		phba->irq_chann_mode = NORMAL_MODE;
5810 		phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5811 		return 0;
5812 	}
5813 
5814 	/* Check if default setting was passed */
5815 	if (val == LPFC_IRQ_CHANN_DEF &&
5816 	    phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
5817 	    phba->sli_rev == LPFC_SLI_REV4)
5818 		lpfc_assign_default_irq_chann(phba);
5819 
5820 	if (phba->irq_chann_mode != NORMAL_MODE) {
5821 		aff_mask = &phba->sli4_hba.irq_aff_mask;
5822 
5823 		if (cpumask_empty(aff_mask)) {
5824 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5825 					"8533 Could not identify CPUS for "
5826 					"mode %d, ignoring\n",
5827 					phba->irq_chann_mode);
5828 			phba->irq_chann_mode = NORMAL_MODE;
5829 			phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5830 		} else {
5831 			phba->cfg_irq_chann = cpumask_weight(aff_mask);
5832 
5833 			/* If no hyperthread mode, then set hdwq count to
5834 			 * aff_mask weight as well
5835 			 */
5836 			if (phba->irq_chann_mode == NHT_MODE)
5837 				phba->cfg_hdw_queue = phba->cfg_irq_chann;
5838 
5839 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5840 					"8543 lpfc_irq_chann set to %u "
5841 					"(mode: %d)\n", phba->cfg_irq_chann,
5842 					phba->irq_chann_mode);
5843 		}
5844 	} else {
5845 		if (val > LPFC_IRQ_CHANN_MAX) {
5846 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5847 					"8545 lpfc_irq_chann attribute cannot "
5848 					"be set to %u, allowed range is "
5849 					"[%u,%u]\n",
5850 					val,
5851 					LPFC_IRQ_CHANN_MIN,
5852 					LPFC_IRQ_CHANN_MAX);
5853 			phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5854 			return -EINVAL;
5855 		}
5856 		if (phba->sli_rev == LPFC_SLI_REV4) {
5857 			phba->cfg_irq_chann = val;
5858 		} else {
5859 			phba->cfg_irq_chann = 2;
5860 			phba->cfg_hdw_queue = 1;
5861 		}
5862 	}
5863 
5864 	return 0;
5865 }
5866 
5867 /**
5868  * lpfc_irq_chann_show - Display value of irq_chann
5869  * @dev: class converted to a Scsi_host structure.
5870  * @attr: device attribute, not used.
5871  * @buf: on return contains a string with the list sizes
5872  *
5873  * Returns: size of formatted string.
5874  **/
5875 static ssize_t
lpfc_irq_chann_show(struct device * dev,struct device_attribute * attr,char * buf)5876 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5877 		    char *buf)
5878 {
5879 	struct Scsi_Host *shost = class_to_shost(dev);
5880 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5881 	struct lpfc_hba *phba = vport->phba;
5882 
5883 	return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5884 }
5885 
5886 static DEVICE_ATTR_RO(lpfc_irq_chann);
5887 
5888 /*
5889 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
5890 #       0  = HBA resets disabled
5891 #       1  = HBA resets enabled (default)
5892 #       2  = HBA reset via PCI bus reset enabled
5893 # Value range is [0,2]. Default value is 1.
5894 */
5895 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5896 
5897 /*
5898 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
5899 #       0  = HBA Heartbeat disabled
5900 #       1  = HBA Heartbeat enabled (default)
5901 # Value range is [0,1]. Default value is 1.
5902 */
5903 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5904 
5905 /*
5906 # lpfc_EnableXLane: Enable Express Lane Feature
5907 #      0x0   Express Lane Feature disabled
5908 #      0x1   Express Lane Feature enabled
5909 # Value range is [0,1]. Default value is 0.
5910 */
5911 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5912 
5913 /*
5914 # lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature
5915 #       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits)
5916 # Value range is [0x0,0x7f]. Default value is 0
5917 */
5918 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5919 
5920 /*
5921 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
5922 #       0  = BlockGuard disabled (default)
5923 #       1  = BlockGuard enabled
5924 # Value range is [0,1]. Default value is 0.
5925 */
5926 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5927 
5928 /*
5929 # lpfc_prot_mask:
5930 #	- Bit mask of host protection capabilities used to register with the
5931 #	  SCSI mid-layer
5932 # 	- Only meaningful if BG is turned on (lpfc_enable_bg=1).
5933 #	- Allows you to ultimately specify which profiles to use
5934 #	- Default will result in registering capabilities for all profiles.
5935 #	- SHOST_DIF_TYPE1_PROTECTION	1
5936 #		HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
5937 #	- SHOST_DIX_TYPE0_PROTECTION	8
5938 #		HBA supports DIX Type 0: Host to HBA protection only
5939 #	- SHOST_DIX_TYPE1_PROTECTION	16
5940 #		HBA supports DIX Type 1: Host to HBA  Type 1 protection
5941 #
5942 */
5943 LPFC_ATTR(prot_mask,
5944 	(SHOST_DIF_TYPE1_PROTECTION |
5945 	SHOST_DIX_TYPE0_PROTECTION |
5946 	SHOST_DIX_TYPE1_PROTECTION),
5947 	0,
5948 	(SHOST_DIF_TYPE1_PROTECTION |
5949 	SHOST_DIX_TYPE0_PROTECTION |
5950 	SHOST_DIX_TYPE1_PROTECTION),
5951 	"T10-DIF host protection capabilities mask");
5952 
5953 /*
5954 # lpfc_prot_guard:
5955 #	- Bit mask of protection guard types to register with the SCSI mid-layer
5956 #	- Guard types are currently either 1) T10-DIF CRC 2) IP checksum
5957 #	- Allows you to ultimately specify which profiles to use
5958 #	- Default will result in registering capabilities for all guard types
5959 #
5960 */
5961 LPFC_ATTR(prot_guard,
5962 	SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5963 	"T10-DIF host protection guard type");
5964 
5965 /*
5966  * Delay initial NPort discovery when Clean Address bit is cleared in
5967  * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
5968  * This parameter can have value 0 or 1.
5969  * When this parameter is set to 0, no delay is added to the initial
5970  * discovery.
5971  * When this parameter is set to non-zero value, initial Nport discovery is
5972  * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
5973  * accept and FCID/Fabric name/Fabric portname is changed.
5974  * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
5975  * when Clean Address bit is cleared in FLOGI/FDISC
5976  * accept and FCID/Fabric name/Fabric portname is changed.
5977  * Default value is 0.
5978  */
5979 LPFC_ATTR(delay_discovery, 0, 0, 1,
5980 	"Delay NPort discovery when Clean Address bit is cleared.");
5981 
5982 /*
5983  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5984  * This value can be set to values between 64 and 4096. The default value
5985  * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5986  * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
5987  * Because of the additional overhead involved in setting up T10-DIF,
5988  * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
5989  * and will be limited to 512 if BlockGuard is enabled under SLI3.
5990  */
5991 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5992 module_param(lpfc_sg_seg_cnt, uint, 0444);
5993 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
5994 
5995 /**
5996  * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
5997  *    configured for the adapter
5998  * @dev: class converted to a Scsi_host structure.
5999  * @attr: device attribute, not used.
6000  * @buf: on return contains a string with the list sizes
6001  *
6002  * Returns: size of formatted string.
6003  **/
6004 static ssize_t
lpfc_sg_seg_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)6005 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6006 		     char *buf)
6007 {
6008 	struct Scsi_Host  *shost = class_to_shost(dev);
6009 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6010 	struct lpfc_hba   *phba = vport->phba;
6011 	int len;
6012 
6013 	len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
6014 		       phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6015 
6016 	len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
6017 			phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6018 			phba->cfg_nvme_seg_cnt);
6019 	return len;
6020 }
6021 
6022 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6023 
6024 /**
6025  * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6026  * @phba: lpfc_hba pointer.
6027  * @val: contains the initial value
6028  *
6029  * Description:
6030  * Validates the initial value is within range and assigns it to the
6031  * adapter. If not in range, an error message is posted and the
6032  * default value is assigned.
6033  *
6034  * Returns:
6035  * zero if value is in range and is set
6036  * -EINVAL if value was out of range
6037  **/
6038 static int
lpfc_sg_seg_cnt_init(struct lpfc_hba * phba,int val)6039 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6040 {
6041 	if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6042 		phba->cfg_sg_seg_cnt = val;
6043 		return 0;
6044 	}
6045 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6046 			"0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
6047 			"allowed range is [%d, %d]\n",
6048 			val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6049 	phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6050 	return -EINVAL;
6051 }
6052 
6053 /*
6054  * lpfc_enable_mds_diags: Enable MDS Diagnostics
6055  *       0  = MDS Diagnostics disabled (default)
6056  *       1  = MDS Diagnostics enabled
6057  * Value range is [0,1]. Default value is 0.
6058  */
6059 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6060 
6061 /*
6062  * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6063  *	0 = Disable firmware logging (default)
6064  *	[1-4] = Multiple of 1/4th Mb of host memory for FW logging
6065  * Value range [0..4]. Default value is 0
6066  */
6067 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6068 lpfc_param_show(ras_fwlog_buffsize);
6069 
6070 static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba * phba,uint val)6071 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba  *phba, uint val)
6072 {
6073 	int ret = 0;
6074 	enum ras_state state;
6075 
6076 	if (!lpfc_rangecheck(val, 0, 4))
6077 		return -EINVAL;
6078 
6079 	if (phba->cfg_ras_fwlog_buffsize == val)
6080 		return 0;
6081 
6082 	if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6083 		return -EINVAL;
6084 
6085 	spin_lock_irq(&phba->hbalock);
6086 	state = phba->ras_fwlog.state;
6087 	spin_unlock_irq(&phba->hbalock);
6088 
6089 	if (state == REG_INPROGRESS) {
6090 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6091 				"registration is in progress\n");
6092 		return -EBUSY;
6093 	}
6094 
6095 	/* For disable logging: stop the logs and free the DMA.
6096 	 * For ras_fwlog_buffsize size change we still need to free and
6097 	 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6098 	 */
6099 	phba->cfg_ras_fwlog_buffsize = val;
6100 	if (state == ACTIVE) {
6101 		lpfc_ras_stop_fwlog(phba);
6102 		lpfc_sli4_ras_dma_free(phba);
6103 	}
6104 
6105 	lpfc_sli4_ras_init(phba);
6106 	if (phba->ras_fwlog.ras_enabled)
6107 		ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6108 					       LPFC_RAS_ENABLE_LOGGING);
6109 	return ret;
6110 }
6111 
6112 lpfc_param_store(ras_fwlog_buffsize);
6113 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6114 
6115 /*
6116  * lpfc_ras_fwlog_level: Firmware logging verbosity level
6117  * Valid only if firmware logging is enabled
6118  * 0(Least Verbosity) 4 (most verbosity)
6119  * Value range is [0..4]. Default value is 0
6120  */
6121 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6122 
6123 /*
6124  * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6125  * Default function which has RAS support : 0
6126  * Value Range is [0..7].
6127  * FW logging is a global action and enablement is via a specific
6128  * port.
6129  */
6130 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6131 
6132 /*
6133  * lpfc_enable_bbcr: Enable BB Credit Recovery
6134  *       0  = BB Credit Recovery disabled
6135  *       1  = BB Credit Recovery enabled (default)
6136  * Value range is [0,1]. Default value is 1.
6137  */
6138 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6139 
6140 /*
6141  * lpfc_enable_dpp: Enable DPP on G7
6142  *       0  = DPP on G7 disabled
6143  *       1  = DPP on G7 enabled (default)
6144  * Value range is [0,1]. Default value is 1.
6145  */
6146 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6147 
6148 /*
6149  * lpfc_enable_mi: Enable FDMI MIB
6150  *       0  = disabled
6151  *       1  = enabled (default)
6152  * Value range is [0,1].
6153  */
6154 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
6155 
6156 struct device_attribute *lpfc_hba_attrs[] = {
6157 	&dev_attr_nvme_info,
6158 	&dev_attr_scsi_stat,
6159 	&dev_attr_bg_info,
6160 	&dev_attr_bg_guard_err,
6161 	&dev_attr_bg_apptag_err,
6162 	&dev_attr_bg_reftag_err,
6163 	&dev_attr_info,
6164 	&dev_attr_serialnum,
6165 	&dev_attr_modeldesc,
6166 	&dev_attr_modelname,
6167 	&dev_attr_programtype,
6168 	&dev_attr_portnum,
6169 	&dev_attr_fwrev,
6170 	&dev_attr_hdw,
6171 	&dev_attr_option_rom_version,
6172 	&dev_attr_link_state,
6173 	&dev_attr_num_discovered_ports,
6174 	&dev_attr_menlo_mgmt_mode,
6175 	&dev_attr_lpfc_drvr_version,
6176 	&dev_attr_lpfc_enable_fip,
6177 	&dev_attr_lpfc_temp_sensor,
6178 	&dev_attr_lpfc_log_verbose,
6179 	&dev_attr_lpfc_lun_queue_depth,
6180 	&dev_attr_lpfc_tgt_queue_depth,
6181 	&dev_attr_lpfc_hba_queue_depth,
6182 	&dev_attr_lpfc_peer_port_login,
6183 	&dev_attr_lpfc_nodev_tmo,
6184 	&dev_attr_lpfc_devloss_tmo,
6185 	&dev_attr_lpfc_enable_fc4_type,
6186 	&dev_attr_lpfc_fcp_class,
6187 	&dev_attr_lpfc_use_adisc,
6188 	&dev_attr_lpfc_first_burst_size,
6189 	&dev_attr_lpfc_ack0,
6190 	&dev_attr_lpfc_xri_rebalancing,
6191 	&dev_attr_lpfc_topology,
6192 	&dev_attr_lpfc_scan_down,
6193 	&dev_attr_lpfc_link_speed,
6194 	&dev_attr_lpfc_fcp_io_sched,
6195 	&dev_attr_lpfc_ns_query,
6196 	&dev_attr_lpfc_fcp2_no_tgt_reset,
6197 	&dev_attr_lpfc_cr_delay,
6198 	&dev_attr_lpfc_cr_count,
6199 	&dev_attr_lpfc_multi_ring_support,
6200 	&dev_attr_lpfc_multi_ring_rctl,
6201 	&dev_attr_lpfc_multi_ring_type,
6202 	&dev_attr_lpfc_fdmi_on,
6203 	&dev_attr_lpfc_enable_SmartSAN,
6204 	&dev_attr_lpfc_max_luns,
6205 	&dev_attr_lpfc_enable_npiv,
6206 	&dev_attr_lpfc_fcf_failover_policy,
6207 	&dev_attr_lpfc_enable_rrq,
6208 	&dev_attr_nport_evt_cnt,
6209 	&dev_attr_board_mode,
6210 	&dev_attr_max_vpi,
6211 	&dev_attr_used_vpi,
6212 	&dev_attr_max_rpi,
6213 	&dev_attr_used_rpi,
6214 	&dev_attr_max_xri,
6215 	&dev_attr_used_xri,
6216 	&dev_attr_npiv_info,
6217 	&dev_attr_issue_reset,
6218 	&dev_attr_lpfc_poll,
6219 	&dev_attr_lpfc_poll_tmo,
6220 	&dev_attr_lpfc_task_mgmt_tmo,
6221 	&dev_attr_lpfc_use_msi,
6222 	&dev_attr_lpfc_nvme_oas,
6223 	&dev_attr_lpfc_nvme_embed_cmd,
6224 	&dev_attr_lpfc_fcp_imax,
6225 	&dev_attr_lpfc_force_rscn,
6226 	&dev_attr_lpfc_cq_poll_threshold,
6227 	&dev_attr_lpfc_cq_max_proc_limit,
6228 	&dev_attr_lpfc_fcp_cpu_map,
6229 	&dev_attr_lpfc_fcp_mq_threshold,
6230 	&dev_attr_lpfc_hdw_queue,
6231 	&dev_attr_lpfc_irq_chann,
6232 	&dev_attr_lpfc_suppress_rsp,
6233 	&dev_attr_lpfc_nvmet_mrq,
6234 	&dev_attr_lpfc_nvmet_mrq_post,
6235 	&dev_attr_lpfc_nvme_enable_fb,
6236 	&dev_attr_lpfc_nvmet_fb_size,
6237 	&dev_attr_lpfc_enable_bg,
6238 	&dev_attr_lpfc_soft_wwnn,
6239 	&dev_attr_lpfc_soft_wwpn,
6240 	&dev_attr_lpfc_soft_wwn_enable,
6241 	&dev_attr_lpfc_enable_hba_reset,
6242 	&dev_attr_lpfc_enable_hba_heartbeat,
6243 	&dev_attr_lpfc_EnableXLane,
6244 	&dev_attr_lpfc_XLanePriority,
6245 	&dev_attr_lpfc_xlane_lun,
6246 	&dev_attr_lpfc_xlane_tgt,
6247 	&dev_attr_lpfc_xlane_vpt,
6248 	&dev_attr_lpfc_xlane_lun_state,
6249 	&dev_attr_lpfc_xlane_lun_status,
6250 	&dev_attr_lpfc_xlane_priority,
6251 	&dev_attr_lpfc_sg_seg_cnt,
6252 	&dev_attr_lpfc_max_scsicmpl_time,
6253 	&dev_attr_lpfc_stat_data_ctrl,
6254 	&dev_attr_lpfc_aer_support,
6255 	&dev_attr_lpfc_aer_state_cleanup,
6256 	&dev_attr_lpfc_sriov_nr_virtfn,
6257 	&dev_attr_lpfc_req_fw_upgrade,
6258 	&dev_attr_lpfc_suppress_link_up,
6259 	&dev_attr_iocb_hw,
6260 	&dev_attr_pls,
6261 	&dev_attr_pt,
6262 	&dev_attr_txq_hw,
6263 	&dev_attr_txcmplq_hw,
6264 	&dev_attr_lpfc_sriov_hw_max_virtfn,
6265 	&dev_attr_protocol,
6266 	&dev_attr_lpfc_xlane_supported,
6267 	&dev_attr_lpfc_enable_mds_diags,
6268 	&dev_attr_lpfc_ras_fwlog_buffsize,
6269 	&dev_attr_lpfc_ras_fwlog_level,
6270 	&dev_attr_lpfc_ras_fwlog_func,
6271 	&dev_attr_lpfc_enable_bbcr,
6272 	&dev_attr_lpfc_enable_dpp,
6273 	&dev_attr_lpfc_enable_mi,
6274 	NULL,
6275 };
6276 
6277 struct device_attribute *lpfc_vport_attrs[] = {
6278 	&dev_attr_info,
6279 	&dev_attr_link_state,
6280 	&dev_attr_num_discovered_ports,
6281 	&dev_attr_lpfc_drvr_version,
6282 	&dev_attr_lpfc_log_verbose,
6283 	&dev_attr_lpfc_lun_queue_depth,
6284 	&dev_attr_lpfc_tgt_queue_depth,
6285 	&dev_attr_lpfc_nodev_tmo,
6286 	&dev_attr_lpfc_devloss_tmo,
6287 	&dev_attr_lpfc_hba_queue_depth,
6288 	&dev_attr_lpfc_peer_port_login,
6289 	&dev_attr_lpfc_restrict_login,
6290 	&dev_attr_lpfc_fcp_class,
6291 	&dev_attr_lpfc_use_adisc,
6292 	&dev_attr_lpfc_first_burst_size,
6293 	&dev_attr_lpfc_max_luns,
6294 	&dev_attr_nport_evt_cnt,
6295 	&dev_attr_npiv_info,
6296 	&dev_attr_lpfc_enable_da_id,
6297 	&dev_attr_lpfc_max_scsicmpl_time,
6298 	&dev_attr_lpfc_stat_data_ctrl,
6299 	&dev_attr_lpfc_static_vport,
6300 	NULL,
6301 };
6302 
6303 /**
6304  * sysfs_ctlreg_write - Write method for writing to ctlreg
6305  * @filp: open sysfs file
6306  * @kobj: kernel kobject that contains the kernel class device.
6307  * @bin_attr: kernel attributes passed to us.
6308  * @buf: contains the data to be written to the adapter IOREG space.
6309  * @off: offset into buffer to beginning of data.
6310  * @count: bytes to transfer.
6311  *
6312  * Description:
6313  * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6314  * Uses the adapter io control registers to send buf contents to the adapter.
6315  *
6316  * Returns:
6317  * -ERANGE off and count combo out of range
6318  * -EINVAL off, count or buff address invalid
6319  * -EPERM adapter is offline
6320  * value of count, buf contents written
6321  **/
6322 static ssize_t
sysfs_ctlreg_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6323 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6324 		   struct bin_attribute *bin_attr,
6325 		   char *buf, loff_t off, size_t count)
6326 {
6327 	size_t buf_off;
6328 	struct device *dev = container_of(kobj, struct device, kobj);
6329 	struct Scsi_Host  *shost = class_to_shost(dev);
6330 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6331 	struct lpfc_hba   *phba = vport->phba;
6332 
6333 	if (phba->sli_rev >= LPFC_SLI_REV4)
6334 		return -EPERM;
6335 
6336 	if ((off + count) > FF_REG_AREA_SIZE)
6337 		return -ERANGE;
6338 
6339 	if (count <= LPFC_REG_WRITE_KEY_SIZE)
6340 		return 0;
6341 
6342 	if (off % 4 || count % 4 || (unsigned long)buf % 4)
6343 		return -EINVAL;
6344 
6345 	/* This is to protect HBA registers from accidental writes. */
6346 	if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6347 		return -EINVAL;
6348 
6349 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
6350 		return -EPERM;
6351 
6352 	spin_lock_irq(&phba->hbalock);
6353 	for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6354 			buf_off += sizeof(uint32_t))
6355 		writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6356 		       phba->ctrl_regs_memmap_p + off + buf_off);
6357 
6358 	spin_unlock_irq(&phba->hbalock);
6359 
6360 	return count;
6361 }
6362 
6363 /**
6364  * sysfs_ctlreg_read - Read method for reading from ctlreg
6365  * @filp: open sysfs file
6366  * @kobj: kernel kobject that contains the kernel class device.
6367  * @bin_attr: kernel attributes passed to us.
6368  * @buf: if successful contains the data from the adapter IOREG space.
6369  * @off: offset into buffer to beginning of data.
6370  * @count: bytes to transfer.
6371  *
6372  * Description:
6373  * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6374  * Uses the adapter io control registers to read data into buf.
6375  *
6376  * Returns:
6377  * -ERANGE off and count combo out of range
6378  * -EINVAL off, count or buff address invalid
6379  * value of count, buf contents read
6380  **/
6381 static ssize_t
sysfs_ctlreg_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6382 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6383 		  struct bin_attribute *bin_attr,
6384 		  char *buf, loff_t off, size_t count)
6385 {
6386 	size_t buf_off;
6387 	uint32_t * tmp_ptr;
6388 	struct device *dev = container_of(kobj, struct device, kobj);
6389 	struct Scsi_Host  *shost = class_to_shost(dev);
6390 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6391 	struct lpfc_hba   *phba = vport->phba;
6392 
6393 	if (phba->sli_rev >= LPFC_SLI_REV4)
6394 		return -EPERM;
6395 
6396 	if (off > FF_REG_AREA_SIZE)
6397 		return -ERANGE;
6398 
6399 	if ((off + count) > FF_REG_AREA_SIZE)
6400 		count = FF_REG_AREA_SIZE - off;
6401 
6402 	if (count == 0) return 0;
6403 
6404 	if (off % 4 || count % 4 || (unsigned long)buf % 4)
6405 		return -EINVAL;
6406 
6407 	spin_lock_irq(&phba->hbalock);
6408 
6409 	for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6410 		tmp_ptr = (uint32_t *)(buf + buf_off);
6411 		*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6412 	}
6413 
6414 	spin_unlock_irq(&phba->hbalock);
6415 
6416 	return count;
6417 }
6418 
6419 static struct bin_attribute sysfs_ctlreg_attr = {
6420 	.attr = {
6421 		.name = "ctlreg",
6422 		.mode = S_IRUSR | S_IWUSR,
6423 	},
6424 	.size = 256,
6425 	.read = sysfs_ctlreg_read,
6426 	.write = sysfs_ctlreg_write,
6427 };
6428 
6429 /**
6430  * sysfs_mbox_write - Write method for writing information via mbox
6431  * @filp: open sysfs file
6432  * @kobj: kernel kobject that contains the kernel class device.
6433  * @bin_attr: kernel attributes passed to us.
6434  * @buf: contains the data to be written to sysfs mbox.
6435  * @off: offset into buffer to beginning of data.
6436  * @count: bytes to transfer.
6437  *
6438  * Description:
6439  * Deprecated function. All mailbox access from user space is performed via the
6440  * bsg interface.
6441  *
6442  * Returns:
6443  * -EPERM operation not permitted
6444  **/
6445 static ssize_t
sysfs_mbox_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6446 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6447 		 struct bin_attribute *bin_attr,
6448 		 char *buf, loff_t off, size_t count)
6449 {
6450 	return -EPERM;
6451 }
6452 
6453 /**
6454  * sysfs_mbox_read - Read method for reading information via mbox
6455  * @filp: open sysfs file
6456  * @kobj: kernel kobject that contains the kernel class device.
6457  * @bin_attr: kernel attributes passed to us.
6458  * @buf: contains the data to be read from sysfs mbox.
6459  * @off: offset into buffer to beginning of data.
6460  * @count: bytes to transfer.
6461  *
6462  * Description:
6463  * Deprecated function. All mailbox access from user space is performed via the
6464  * bsg interface.
6465  *
6466  * Returns:
6467  * -EPERM operation not permitted
6468  **/
6469 static ssize_t
sysfs_mbox_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6470 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6471 		struct bin_attribute *bin_attr,
6472 		char *buf, loff_t off, size_t count)
6473 {
6474 	return -EPERM;
6475 }
6476 
6477 static struct bin_attribute sysfs_mbox_attr = {
6478 	.attr = {
6479 		.name = "mbox",
6480 		.mode = S_IRUSR | S_IWUSR,
6481 	},
6482 	.size = MAILBOX_SYSFS_MAX,
6483 	.read = sysfs_mbox_read,
6484 	.write = sysfs_mbox_write,
6485 };
6486 
6487 /**
6488  * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6489  * @vport: address of lpfc vport structure.
6490  *
6491  * Return codes:
6492  * zero on success
6493  * error return code from sysfs_create_bin_file()
6494  **/
6495 int
lpfc_alloc_sysfs_attr(struct lpfc_vport * vport)6496 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6497 {
6498 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6499 	int error;
6500 
6501 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6502 				      &sysfs_drvr_stat_data_attr);
6503 
6504 	/* Virtual ports do not need ctrl_reg and mbox */
6505 	if (error || vport->port_type == LPFC_NPIV_PORT)
6506 		goto out;
6507 
6508 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6509 				      &sysfs_ctlreg_attr);
6510 	if (error)
6511 		goto out_remove_stat_attr;
6512 
6513 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6514 				      &sysfs_mbox_attr);
6515 	if (error)
6516 		goto out_remove_ctlreg_attr;
6517 
6518 	return 0;
6519 out_remove_ctlreg_attr:
6520 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6521 out_remove_stat_attr:
6522 	sysfs_remove_bin_file(&shost->shost_dev.kobj,
6523 			&sysfs_drvr_stat_data_attr);
6524 out:
6525 	return error;
6526 }
6527 
6528 /**
6529  * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6530  * @vport: address of lpfc vport structure.
6531  **/
6532 void
lpfc_free_sysfs_attr(struct lpfc_vport * vport)6533 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6534 {
6535 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6536 	sysfs_remove_bin_file(&shost->shost_dev.kobj,
6537 		&sysfs_drvr_stat_data_attr);
6538 	/* Virtual ports do not need ctrl_reg and mbox */
6539 	if (vport->port_type == LPFC_NPIV_PORT)
6540 		return;
6541 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6542 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6543 }
6544 
6545 /*
6546  * Dynamic FC Host Attributes Support
6547  */
6548 
6549 /**
6550  * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6551  * @shost: kernel scsi host pointer.
6552  **/
6553 static void
lpfc_get_host_symbolic_name(struct Scsi_Host * shost)6554 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6555 {
6556 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6557 
6558 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6559 				      sizeof fc_host_symbolic_name(shost));
6560 }
6561 
6562 /**
6563  * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6564  * @shost: kernel scsi host pointer.
6565  **/
6566 static void
lpfc_get_host_port_id(struct Scsi_Host * shost)6567 lpfc_get_host_port_id(struct Scsi_Host *shost)
6568 {
6569 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6570 
6571 	/* note: fc_myDID already in cpu endianness */
6572 	fc_host_port_id(shost) = vport->fc_myDID;
6573 }
6574 
6575 /**
6576  * lpfc_get_host_port_type - Set the value of the scsi host port type
6577  * @shost: kernel scsi host pointer.
6578  **/
6579 static void
lpfc_get_host_port_type(struct Scsi_Host * shost)6580 lpfc_get_host_port_type(struct Scsi_Host *shost)
6581 {
6582 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6583 	struct lpfc_hba   *phba = vport->phba;
6584 
6585 	spin_lock_irq(shost->host_lock);
6586 
6587 	if (vport->port_type == LPFC_NPIV_PORT) {
6588 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6589 	} else if (lpfc_is_link_up(phba)) {
6590 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6591 			if (vport->fc_flag & FC_PUBLIC_LOOP)
6592 				fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6593 			else
6594 				fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6595 		} else {
6596 			if (vport->fc_flag & FC_FABRIC)
6597 				fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6598 			else
6599 				fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6600 		}
6601 	} else
6602 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6603 
6604 	spin_unlock_irq(shost->host_lock);
6605 }
6606 
6607 /**
6608  * lpfc_get_host_port_state - Set the value of the scsi host port state
6609  * @shost: kernel scsi host pointer.
6610  **/
6611 static void
lpfc_get_host_port_state(struct Scsi_Host * shost)6612 lpfc_get_host_port_state(struct Scsi_Host *shost)
6613 {
6614 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6615 	struct lpfc_hba   *phba = vport->phba;
6616 
6617 	spin_lock_irq(shost->host_lock);
6618 
6619 	if (vport->fc_flag & FC_OFFLINE_MODE)
6620 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6621 	else {
6622 		switch (phba->link_state) {
6623 		case LPFC_LINK_UNKNOWN:
6624 		case LPFC_LINK_DOWN:
6625 			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6626 			break;
6627 		case LPFC_LINK_UP:
6628 		case LPFC_CLEAR_LA:
6629 		case LPFC_HBA_READY:
6630 			/* Links up, reports port state accordingly */
6631 			if (vport->port_state < LPFC_VPORT_READY)
6632 				fc_host_port_state(shost) =
6633 							FC_PORTSTATE_BYPASSED;
6634 			else
6635 				fc_host_port_state(shost) =
6636 							FC_PORTSTATE_ONLINE;
6637 			break;
6638 		case LPFC_HBA_ERROR:
6639 			fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6640 			break;
6641 		default:
6642 			fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6643 			break;
6644 		}
6645 	}
6646 
6647 	spin_unlock_irq(shost->host_lock);
6648 }
6649 
6650 /**
6651  * lpfc_get_host_speed - Set the value of the scsi host speed
6652  * @shost: kernel scsi host pointer.
6653  **/
6654 static void
lpfc_get_host_speed(struct Scsi_Host * shost)6655 lpfc_get_host_speed(struct Scsi_Host *shost)
6656 {
6657 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6658 	struct lpfc_hba   *phba = vport->phba;
6659 
6660 	spin_lock_irq(shost->host_lock);
6661 
6662 	if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6663 		switch(phba->fc_linkspeed) {
6664 		case LPFC_LINK_SPEED_1GHZ:
6665 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6666 			break;
6667 		case LPFC_LINK_SPEED_2GHZ:
6668 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6669 			break;
6670 		case LPFC_LINK_SPEED_4GHZ:
6671 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6672 			break;
6673 		case LPFC_LINK_SPEED_8GHZ:
6674 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6675 			break;
6676 		case LPFC_LINK_SPEED_10GHZ:
6677 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6678 			break;
6679 		case LPFC_LINK_SPEED_16GHZ:
6680 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6681 			break;
6682 		case LPFC_LINK_SPEED_32GHZ:
6683 			fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6684 			break;
6685 		case LPFC_LINK_SPEED_64GHZ:
6686 			fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6687 			break;
6688 		case LPFC_LINK_SPEED_128GHZ:
6689 			fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6690 			break;
6691 		default:
6692 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6693 			break;
6694 		}
6695 	} else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6696 		switch (phba->fc_linkspeed) {
6697 		case LPFC_ASYNC_LINK_SPEED_1GBPS:
6698 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6699 			break;
6700 		case LPFC_ASYNC_LINK_SPEED_10GBPS:
6701 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6702 			break;
6703 		case LPFC_ASYNC_LINK_SPEED_20GBPS:
6704 			fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6705 			break;
6706 		case LPFC_ASYNC_LINK_SPEED_25GBPS:
6707 			fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6708 			break;
6709 		case LPFC_ASYNC_LINK_SPEED_40GBPS:
6710 			fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6711 			break;
6712 		case LPFC_ASYNC_LINK_SPEED_100GBPS:
6713 			fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6714 			break;
6715 		default:
6716 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6717 			break;
6718 		}
6719 	} else
6720 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6721 
6722 	spin_unlock_irq(shost->host_lock);
6723 }
6724 
6725 /**
6726  * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6727  * @shost: kernel scsi host pointer.
6728  **/
6729 static void
lpfc_get_host_fabric_name(struct Scsi_Host * shost)6730 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6731 {
6732 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6733 	struct lpfc_hba   *phba = vport->phba;
6734 	u64 node_name;
6735 
6736 	spin_lock_irq(shost->host_lock);
6737 
6738 	if ((vport->port_state > LPFC_FLOGI) &&
6739 	    ((vport->fc_flag & FC_FABRIC) ||
6740 	     ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6741 	      (vport->fc_flag & FC_PUBLIC_LOOP))))
6742 		node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6743 	else
6744 		/* fabric is local port if there is no F/FL_Port */
6745 		node_name = 0;
6746 
6747 	spin_unlock_irq(shost->host_lock);
6748 
6749 	fc_host_fabric_name(shost) = node_name;
6750 }
6751 
6752 /**
6753  * lpfc_get_stats - Return statistical information about the adapter
6754  * @shost: kernel scsi host pointer.
6755  *
6756  * Notes:
6757  * NULL on error for link down, no mbox pool, sli2 active,
6758  * management not allowed, memory allocation error, or mbox error.
6759  *
6760  * Returns:
6761  * NULL for error
6762  * address of the adapter host statistics
6763  **/
6764 static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host * shost)6765 lpfc_get_stats(struct Scsi_Host *shost)
6766 {
6767 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6768 	struct lpfc_hba   *phba = vport->phba;
6769 	struct lpfc_sli   *psli = &phba->sli;
6770 	struct fc_host_statistics *hs = &phba->link_stats;
6771 	struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6772 	LPFC_MBOXQ_t *pmboxq;
6773 	MAILBOX_t *pmb;
6774 	int rc = 0;
6775 
6776 	/*
6777 	 * prevent udev from issuing mailbox commands until the port is
6778 	 * configured.
6779 	 */
6780 	if (phba->link_state < LPFC_LINK_DOWN ||
6781 	    !phba->mbox_mem_pool ||
6782 	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6783 		return NULL;
6784 
6785 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6786 		return NULL;
6787 
6788 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6789 	if (!pmboxq)
6790 		return NULL;
6791 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6792 
6793 	pmb = &pmboxq->u.mb;
6794 	pmb->mbxCommand = MBX_READ_STATUS;
6795 	pmb->mbxOwner = OWN_HOST;
6796 	pmboxq->ctx_buf = NULL;
6797 	pmboxq->vport = vport;
6798 
6799 	if (vport->fc_flag & FC_OFFLINE_MODE) {
6800 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6801 		if (rc != MBX_SUCCESS) {
6802 			mempool_free(pmboxq, phba->mbox_mem_pool);
6803 			return NULL;
6804 		}
6805 	} else {
6806 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6807 		if (rc != MBX_SUCCESS) {
6808 			if (rc != MBX_TIMEOUT)
6809 				mempool_free(pmboxq, phba->mbox_mem_pool);
6810 			return NULL;
6811 		}
6812 	}
6813 
6814 	memset(hs, 0, sizeof (struct fc_host_statistics));
6815 
6816 	hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6817 	/*
6818 	 * The MBX_READ_STATUS returns tx_k_bytes which has to
6819 	 * converted to words
6820 	 */
6821 	hs->tx_words = (uint64_t)
6822 			((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6823 			* (uint64_t)256);
6824 	hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6825 	hs->rx_words = (uint64_t)
6826 			((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6827 			 * (uint64_t)256);
6828 
6829 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6830 	pmb->mbxCommand = MBX_READ_LNK_STAT;
6831 	pmb->mbxOwner = OWN_HOST;
6832 	pmboxq->ctx_buf = NULL;
6833 	pmboxq->vport = vport;
6834 
6835 	if (vport->fc_flag & FC_OFFLINE_MODE) {
6836 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6837 		if (rc != MBX_SUCCESS) {
6838 			mempool_free(pmboxq, phba->mbox_mem_pool);
6839 			return NULL;
6840 		}
6841 	} else {
6842 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6843 		if (rc != MBX_SUCCESS) {
6844 			if (rc != MBX_TIMEOUT)
6845 				mempool_free(pmboxq, phba->mbox_mem_pool);
6846 			return NULL;
6847 		}
6848 	}
6849 
6850 	hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6851 	hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6852 	hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6853 	hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6854 	hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6855 	hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6856 	hs->error_frames = pmb->un.varRdLnk.crcCnt;
6857 
6858 	hs->link_failure_count -= lso->link_failure_count;
6859 	hs->loss_of_sync_count -= lso->loss_of_sync_count;
6860 	hs->loss_of_signal_count -= lso->loss_of_signal_count;
6861 	hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6862 	hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6863 	hs->invalid_crc_count -= lso->invalid_crc_count;
6864 	hs->error_frames -= lso->error_frames;
6865 
6866 	if (phba->hba_flag & HBA_FCOE_MODE) {
6867 		hs->lip_count = -1;
6868 		hs->nos_count = (phba->link_events >> 1);
6869 		hs->nos_count -= lso->link_events;
6870 	} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6871 		hs->lip_count = (phba->fc_eventTag >> 1);
6872 		hs->lip_count -= lso->link_events;
6873 		hs->nos_count = -1;
6874 	} else {
6875 		hs->lip_count = -1;
6876 		hs->nos_count = (phba->fc_eventTag >> 1);
6877 		hs->nos_count -= lso->link_events;
6878 	}
6879 
6880 	hs->dumped_frames = -1;
6881 
6882 	hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6883 
6884 	mempool_free(pmboxq, phba->mbox_mem_pool);
6885 
6886 	return hs;
6887 }
6888 
6889 /**
6890  * lpfc_reset_stats - Copy the adapter link stats information
6891  * @shost: kernel scsi host pointer.
6892  **/
6893 static void
lpfc_reset_stats(struct Scsi_Host * shost)6894 lpfc_reset_stats(struct Scsi_Host *shost)
6895 {
6896 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6897 	struct lpfc_hba   *phba = vport->phba;
6898 	struct lpfc_sli   *psli = &phba->sli;
6899 	struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6900 	LPFC_MBOXQ_t *pmboxq;
6901 	MAILBOX_t *pmb;
6902 	int rc = 0;
6903 
6904 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6905 		return;
6906 
6907 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6908 	if (!pmboxq)
6909 		return;
6910 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6911 
6912 	pmb = &pmboxq->u.mb;
6913 	pmb->mbxCommand = MBX_READ_STATUS;
6914 	pmb->mbxOwner = OWN_HOST;
6915 	pmb->un.varWords[0] = 0x1; /* reset request */
6916 	pmboxq->ctx_buf = NULL;
6917 	pmboxq->vport = vport;
6918 
6919 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6920 		(!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6921 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6922 		if (rc != MBX_SUCCESS) {
6923 			mempool_free(pmboxq, phba->mbox_mem_pool);
6924 			return;
6925 		}
6926 	} else {
6927 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6928 		if (rc != MBX_SUCCESS) {
6929 			if (rc != MBX_TIMEOUT)
6930 				mempool_free(pmboxq, phba->mbox_mem_pool);
6931 			return;
6932 		}
6933 	}
6934 
6935 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6936 	pmb->mbxCommand = MBX_READ_LNK_STAT;
6937 	pmb->mbxOwner = OWN_HOST;
6938 	pmboxq->ctx_buf = NULL;
6939 	pmboxq->vport = vport;
6940 
6941 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6942 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6943 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6944 		if (rc != MBX_SUCCESS) {
6945 			mempool_free(pmboxq, phba->mbox_mem_pool);
6946 			return;
6947 		}
6948 	} else {
6949 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6950 		if (rc != MBX_SUCCESS) {
6951 			if (rc != MBX_TIMEOUT)
6952 				mempool_free(pmboxq, phba->mbox_mem_pool);
6953 			return;
6954 		}
6955 	}
6956 
6957 	lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6958 	lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6959 	lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6960 	lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6961 	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6962 	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6963 	lso->error_frames = pmb->un.varRdLnk.crcCnt;
6964 	if (phba->hba_flag & HBA_FCOE_MODE)
6965 		lso->link_events = (phba->link_events >> 1);
6966 	else
6967 		lso->link_events = (phba->fc_eventTag >> 1);
6968 
6969 	psli->stats_start = ktime_get_seconds();
6970 
6971 	mempool_free(pmboxq, phba->mbox_mem_pool);
6972 
6973 	return;
6974 }
6975 
6976 /*
6977  * The LPFC driver treats linkdown handling as target loss events so there
6978  * are no sysfs handlers for link_down_tmo.
6979  */
6980 
6981 /**
6982  * lpfc_get_node_by_target - Return the nodelist for a target
6983  * @starget: kernel scsi target pointer.
6984  *
6985  * Returns:
6986  * address of the node list if found
6987  * NULL target not found
6988  **/
6989 static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target * starget)6990 lpfc_get_node_by_target(struct scsi_target *starget)
6991 {
6992 	struct Scsi_Host  *shost = dev_to_shost(starget->dev.parent);
6993 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6994 	struct lpfc_nodelist *ndlp;
6995 
6996 	spin_lock_irq(shost->host_lock);
6997 	/* Search for this, mapped, target ID */
6998 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6999 		if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7000 		    starget->id == ndlp->nlp_sid) {
7001 			spin_unlock_irq(shost->host_lock);
7002 			return ndlp;
7003 		}
7004 	}
7005 	spin_unlock_irq(shost->host_lock);
7006 	return NULL;
7007 }
7008 
7009 /**
7010  * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
7011  * @starget: kernel scsi target pointer.
7012  **/
7013 static void
lpfc_get_starget_port_id(struct scsi_target * starget)7014 lpfc_get_starget_port_id(struct scsi_target *starget)
7015 {
7016 	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7017 
7018 	fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7019 }
7020 
7021 /**
7022  * lpfc_get_starget_node_name - Set the target node name
7023  * @starget: kernel scsi target pointer.
7024  *
7025  * Description: Set the target node name to the ndlp node name wwn or zero.
7026  **/
7027 static void
lpfc_get_starget_node_name(struct scsi_target * starget)7028 lpfc_get_starget_node_name(struct scsi_target *starget)
7029 {
7030 	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7031 
7032 	fc_starget_node_name(starget) =
7033 		ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7034 }
7035 
7036 /**
7037  * lpfc_get_starget_port_name - Set the target port name
7038  * @starget: kernel scsi target pointer.
7039  *
7040  * Description:  set the target port name to the ndlp port name wwn or zero.
7041  **/
7042 static void
lpfc_get_starget_port_name(struct scsi_target * starget)7043 lpfc_get_starget_port_name(struct scsi_target *starget)
7044 {
7045 	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7046 
7047 	fc_starget_port_name(starget) =
7048 		ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7049 }
7050 
7051 /**
7052  * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7053  * @rport: fc rport address.
7054  * @timeout: new value for dev loss tmo.
7055  *
7056  * Description:
7057  * If timeout is non zero set the dev_loss_tmo to timeout, else set
7058  * dev_loss_tmo to one.
7059  **/
7060 static void
lpfc_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)7061 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7062 {
7063 	struct lpfc_rport_data *rdata = rport->dd_data;
7064 	struct lpfc_nodelist *ndlp = rdata->pnode;
7065 #if (IS_ENABLED(CONFIG_NVME_FC))
7066 	struct lpfc_nvme_rport *nrport = NULL;
7067 #endif
7068 
7069 	if (timeout)
7070 		rport->dev_loss_tmo = timeout;
7071 	else
7072 		rport->dev_loss_tmo = 1;
7073 
7074 	if (!ndlp) {
7075 		dev_info(&rport->dev, "Cannot find remote node to "
7076 				      "set rport dev loss tmo, port_id x%x\n",
7077 				      rport->port_id);
7078 		return;
7079 	}
7080 
7081 #if (IS_ENABLED(CONFIG_NVME_FC))
7082 	nrport = lpfc_ndlp_get_nrport(ndlp);
7083 
7084 	if (nrport && nrport->remoteport)
7085 		nvme_fc_set_remoteport_devloss(nrport->remoteport,
7086 					       rport->dev_loss_tmo);
7087 #endif
7088 }
7089 
7090 /*
7091  * lpfc_rport_show_function - Return rport target information
7092  *
7093  * Description:
7094  * Macro that uses field to generate a function with the name lpfc_show_rport_
7095  *
7096  * lpfc_show_rport_##field: returns the bytes formatted in buf
7097  * @cdev: class converted to an fc_rport.
7098  * @buf: on return contains the target_field or zero.
7099  *
7100  * Returns: size of formatted string.
7101  **/
7102 #define lpfc_rport_show_function(field, format_string, sz, cast)	\
7103 static ssize_t								\
7104 lpfc_show_rport_##field (struct device *dev,				\
7105 			 struct device_attribute *attr,			\
7106 			 char *buf)					\
7107 {									\
7108 	struct fc_rport *rport = transport_class_to_rport(dev);		\
7109 	struct lpfc_rport_data *rdata = rport->hostdata;		\
7110 	return scnprintf(buf, sz, format_string,			\
7111 		(rdata->target) ? cast rdata->target->field : 0);	\
7112 }
7113 
7114 #define lpfc_rport_rd_attr(field, format_string, sz)			\
7115 	lpfc_rport_show_function(field, format_string, sz, )		\
7116 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7117 
7118 /**
7119  * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7120  * @fc_vport: The fc_vport who's symbolic name has been changed.
7121  *
7122  * Description:
7123  * This function is called by the transport after the @fc_vport's symbolic name
7124  * has been changed. This function re-registers the symbolic name with the
7125  * switch to propagate the change into the fabric if the vport is active.
7126  **/
7127 static void
lpfc_set_vport_symbolic_name(struct fc_vport * fc_vport)7128 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7129 {
7130 	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7131 
7132 	if (vport->port_state == LPFC_VPORT_READY)
7133 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7134 }
7135 
7136 /**
7137  * lpfc_hba_log_verbose_init - Set hba's log verbose level
7138  * @phba: Pointer to lpfc_hba struct.
7139  * @verbose: Verbose level to set.
7140  *
7141  * This function is called by the lpfc_get_cfgparam() routine to set the
7142  * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7143  * log message according to the module's lpfc_log_verbose parameter setting
7144  * before hba port or vport created.
7145  **/
7146 static void
lpfc_hba_log_verbose_init(struct lpfc_hba * phba,uint32_t verbose)7147 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7148 {
7149 	phba->cfg_log_verbose = verbose;
7150 }
7151 
7152 struct fc_function_template lpfc_transport_functions = {
7153 	/* fixed attributes the driver supports */
7154 	.show_host_node_name = 1,
7155 	.show_host_port_name = 1,
7156 	.show_host_supported_classes = 1,
7157 	.show_host_supported_fc4s = 1,
7158 	.show_host_supported_speeds = 1,
7159 	.show_host_maxframe_size = 1,
7160 
7161 	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
7162 	.show_host_symbolic_name = 1,
7163 
7164 	/* dynamic attributes the driver supports */
7165 	.get_host_port_id = lpfc_get_host_port_id,
7166 	.show_host_port_id = 1,
7167 
7168 	.get_host_port_type = lpfc_get_host_port_type,
7169 	.show_host_port_type = 1,
7170 
7171 	.get_host_port_state = lpfc_get_host_port_state,
7172 	.show_host_port_state = 1,
7173 
7174 	/* active_fc4s is shown but doesn't change (thus no get function) */
7175 	.show_host_active_fc4s = 1,
7176 
7177 	.get_host_speed = lpfc_get_host_speed,
7178 	.show_host_speed = 1,
7179 
7180 	.get_host_fabric_name = lpfc_get_host_fabric_name,
7181 	.show_host_fabric_name = 1,
7182 
7183 	/*
7184 	 * The LPFC driver treats linkdown handling as target loss events
7185 	 * so there are no sysfs handlers for link_down_tmo.
7186 	 */
7187 
7188 	.get_fc_host_stats = lpfc_get_stats,
7189 	.reset_fc_host_stats = lpfc_reset_stats,
7190 
7191 	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
7192 	.show_rport_maxframe_size = 1,
7193 	.show_rport_supported_classes = 1,
7194 
7195 	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7196 	.show_rport_dev_loss_tmo = 1,
7197 
7198 	.get_starget_port_id  = lpfc_get_starget_port_id,
7199 	.show_starget_port_id = 1,
7200 
7201 	.get_starget_node_name = lpfc_get_starget_node_name,
7202 	.show_starget_node_name = 1,
7203 
7204 	.get_starget_port_name = lpfc_get_starget_port_name,
7205 	.show_starget_port_name = 1,
7206 
7207 	.issue_fc_host_lip = lpfc_issue_lip,
7208 	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7209 	.terminate_rport_io = lpfc_terminate_rport_io,
7210 
7211 	.dd_fcvport_size = sizeof(struct lpfc_vport *),
7212 
7213 	.vport_disable = lpfc_vport_disable,
7214 
7215 	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7216 
7217 	.bsg_request = lpfc_bsg_request,
7218 	.bsg_timeout = lpfc_bsg_timeout,
7219 };
7220 
7221 struct fc_function_template lpfc_vport_transport_functions = {
7222 	/* fixed attributes the driver supports */
7223 	.show_host_node_name = 1,
7224 	.show_host_port_name = 1,
7225 	.show_host_supported_classes = 1,
7226 	.show_host_supported_fc4s = 1,
7227 	.show_host_supported_speeds = 1,
7228 	.show_host_maxframe_size = 1,
7229 
7230 	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
7231 	.show_host_symbolic_name = 1,
7232 
7233 	/* dynamic attributes the driver supports */
7234 	.get_host_port_id = lpfc_get_host_port_id,
7235 	.show_host_port_id = 1,
7236 
7237 	.get_host_port_type = lpfc_get_host_port_type,
7238 	.show_host_port_type = 1,
7239 
7240 	.get_host_port_state = lpfc_get_host_port_state,
7241 	.show_host_port_state = 1,
7242 
7243 	/* active_fc4s is shown but doesn't change (thus no get function) */
7244 	.show_host_active_fc4s = 1,
7245 
7246 	.get_host_speed = lpfc_get_host_speed,
7247 	.show_host_speed = 1,
7248 
7249 	.get_host_fabric_name = lpfc_get_host_fabric_name,
7250 	.show_host_fabric_name = 1,
7251 
7252 	/*
7253 	 * The LPFC driver treats linkdown handling as target loss events
7254 	 * so there are no sysfs handlers for link_down_tmo.
7255 	 */
7256 
7257 	.get_fc_host_stats = lpfc_get_stats,
7258 	.reset_fc_host_stats = lpfc_reset_stats,
7259 
7260 	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
7261 	.show_rport_maxframe_size = 1,
7262 	.show_rport_supported_classes = 1,
7263 
7264 	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7265 	.show_rport_dev_loss_tmo = 1,
7266 
7267 	.get_starget_port_id  = lpfc_get_starget_port_id,
7268 	.show_starget_port_id = 1,
7269 
7270 	.get_starget_node_name = lpfc_get_starget_node_name,
7271 	.show_starget_node_name = 1,
7272 
7273 	.get_starget_port_name = lpfc_get_starget_port_name,
7274 	.show_starget_port_name = 1,
7275 
7276 	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7277 	.terminate_rport_io = lpfc_terminate_rport_io,
7278 
7279 	.vport_disable = lpfc_vport_disable,
7280 
7281 	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7282 };
7283 
7284 /**
7285  * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7286  * Mode
7287  * @phba: lpfc_hba pointer.
7288  **/
7289 static void
lpfc_get_hba_function_mode(struct lpfc_hba * phba)7290 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7291 {
7292 	/* If the adapter supports FCoE mode */
7293 	switch (phba->pcidev->device) {
7294 	case PCI_DEVICE_ID_SKYHAWK:
7295 	case PCI_DEVICE_ID_SKYHAWK_VF:
7296 	case PCI_DEVICE_ID_LANCER_FCOE:
7297 	case PCI_DEVICE_ID_LANCER_FCOE_VF:
7298 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
7299 	case PCI_DEVICE_ID_HORNET:
7300 	case PCI_DEVICE_ID_TIGERSHARK:
7301 	case PCI_DEVICE_ID_TOMCAT:
7302 		phba->hba_flag |= HBA_FCOE_MODE;
7303 		break;
7304 	default:
7305 	/* for others, clear the flag */
7306 		phba->hba_flag &= ~HBA_FCOE_MODE;
7307 	}
7308 }
7309 
7310 /**
7311  * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7312  * @phba: lpfc_hba pointer.
7313  **/
7314 void
lpfc_get_cfgparam(struct lpfc_hba * phba)7315 lpfc_get_cfgparam(struct lpfc_hba *phba)
7316 {
7317 	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7318 	lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7319 	lpfc_ns_query_init(phba, lpfc_ns_query);
7320 	lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7321 	lpfc_cr_delay_init(phba, lpfc_cr_delay);
7322 	lpfc_cr_count_init(phba, lpfc_cr_count);
7323 	lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7324 	lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7325 	lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7326 	lpfc_ack0_init(phba, lpfc_ack0);
7327 	lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7328 	lpfc_topology_init(phba, lpfc_topology);
7329 	lpfc_link_speed_init(phba, lpfc_link_speed);
7330 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7331 	lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7332 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7333 	lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7334 	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7335 	lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7336 	lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7337 	lpfc_use_msi_init(phba, lpfc_use_msi);
7338 	lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7339 	lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7340 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7341 	lpfc_force_rscn_init(phba, lpfc_force_rscn);
7342 	lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7343 	lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7344 	lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7345 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7346 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7347 
7348 	lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7349 	if (phba->sli_rev != LPFC_SLI_REV4)
7350 		phba->cfg_EnableXLane = 0;
7351 	lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7352 
7353 	memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7354 	memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7355 	phba->cfg_oas_lun_state = 0;
7356 	phba->cfg_oas_lun_status = 0;
7357 	phba->cfg_oas_flags = 0;
7358 	phba->cfg_oas_priority = 0;
7359 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
7360 	lpfc_prot_mask_init(phba, lpfc_prot_mask);
7361 	lpfc_prot_guard_init(phba, lpfc_prot_guard);
7362 	if (phba->sli_rev == LPFC_SLI_REV4)
7363 		phba->cfg_poll = 0;
7364 	else
7365 		phba->cfg_poll = lpfc_poll;
7366 
7367 	/* Get the function mode */
7368 	lpfc_get_hba_function_mode(phba);
7369 
7370 	/* BlockGuard allowed for FC only. */
7371 	if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7372 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7373 				"0581 BlockGuard feature not supported\n");
7374 		/* If set, clear the BlockGuard support param */
7375 		phba->cfg_enable_bg = 0;
7376 	} else if (phba->cfg_enable_bg) {
7377 		phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7378 	}
7379 
7380 	lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7381 
7382 	lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7383 	lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7384 	lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7385 
7386 	/* Initialize first burst. Target vs Initiator are different. */
7387 	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7388 	lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7389 	lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7390 	lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7391 	lpfc_irq_chann_init(phba, lpfc_irq_chann);
7392 	lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7393 	lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7394 	lpfc_enable_mi_init(phba, lpfc_enable_mi);
7395 
7396 	if (phba->sli_rev != LPFC_SLI_REV4) {
7397 		/* NVME only supported on SLI4 */
7398 		phba->nvmet_support = 0;
7399 		phba->cfg_nvmet_mrq = 0;
7400 		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7401 		phba->cfg_enable_bbcr = 0;
7402 		phba->cfg_xri_rebalancing = 0;
7403 	} else {
7404 		/* We MUST have FCP support */
7405 		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7406 			phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7407 	}
7408 
7409 	phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7410 
7411 	phba->cfg_enable_pbde = 0;
7412 
7413 	/* A value of 0 means use the number of CPUs found in the system */
7414 	if (phba->cfg_hdw_queue == 0)
7415 		phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7416 	if (phba->cfg_irq_chann == 0)
7417 		phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7418 	if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
7419 	    phba->sli_rev == LPFC_SLI_REV4)
7420 		phba->cfg_irq_chann = phba->cfg_hdw_queue;
7421 
7422 	phba->cfg_soft_wwnn = 0L;
7423 	phba->cfg_soft_wwpn = 0L;
7424 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7425 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7426 	lpfc_aer_support_init(phba, lpfc_aer_support);
7427 	lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7428 	lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7429 	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7430 	lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7431 	lpfc_sli_mode_init(phba, lpfc_sli_mode);
7432 	lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7433 	lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7434 	lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7435 	lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7436 
7437 	return;
7438 }
7439 
7440 /**
7441  * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7442  * dependencies between protocols and roles.
7443  * @phba: lpfc_hba pointer.
7444  **/
7445 void
lpfc_nvme_mod_param_dep(struct lpfc_hba * phba)7446 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7447 {
7448 	int  logit = 0;
7449 
7450 	if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7451 		phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7452 		logit = 1;
7453 	}
7454 	if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7455 		phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7456 		logit = 1;
7457 	}
7458 	if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7459 		phba->cfg_irq_chann = phba->cfg_hdw_queue;
7460 		logit = 1;
7461 	}
7462 	if (logit)
7463 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7464 				"2006 Reducing Queues - CPU limitation: "
7465 				"IRQ %d HDWQ %d\n",
7466 				phba->cfg_irq_chann,
7467 				phba->cfg_hdw_queue);
7468 
7469 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7470 	    phba->nvmet_support) {
7471 		phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7472 
7473 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7474 				"6013 %s x%x fb_size x%x, fb_max x%x\n",
7475 				"NVME Target PRLI ACC enable_fb ",
7476 				phba->cfg_nvme_enable_fb,
7477 				phba->cfg_nvmet_fb_size,
7478 				LPFC_NVMET_FB_SZ_MAX);
7479 
7480 		if (phba->cfg_nvme_enable_fb == 0)
7481 			phba->cfg_nvmet_fb_size = 0;
7482 		else {
7483 			if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7484 				phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7485 		}
7486 
7487 		if (!phba->cfg_nvmet_mrq)
7488 			phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7489 
7490 		/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7491 		if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7492 			phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7493 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7494 					"6018 Adjust lpfc_nvmet_mrq to %d\n",
7495 					phba->cfg_nvmet_mrq);
7496 		}
7497 		if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7498 			phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7499 
7500 	} else {
7501 		/* Not NVME Target mode.  Turn off Target parameters. */
7502 		phba->nvmet_support = 0;
7503 		phba->cfg_nvmet_mrq = 0;
7504 		phba->cfg_nvmet_fb_size = 0;
7505 	}
7506 }
7507 
7508 /**
7509  * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7510  * @vport: lpfc_vport pointer.
7511  **/
7512 void
lpfc_get_vport_cfgparam(struct lpfc_vport * vport)7513 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7514 {
7515 	lpfc_log_verbose_init(vport, lpfc_log_verbose);
7516 	lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7517 	lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7518 	lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7519 	lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7520 	lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7521 	lpfc_restrict_login_init(vport, lpfc_restrict_login);
7522 	lpfc_fcp_class_init(vport, lpfc_fcp_class);
7523 	lpfc_use_adisc_init(vport, lpfc_use_adisc);
7524 	lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7525 	lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7526 	lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7527 	lpfc_max_luns_init(vport, lpfc_max_luns);
7528 	lpfc_scan_down_init(vport, lpfc_scan_down);
7529 	lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7530 	return;
7531 }
7532