1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/moduleparam.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mutex.h>
14 #include <linux/kobject.h>
15 #include <linux/slab.h>
16 #include <linux/blk-mq-pci.h>
17 #include <linux/refcount.h>
18 #include <linux/crash_dump.h>
19 #include <linux/trace_events.h>
20 #include <linux/trace.h>
21
22 #include <scsi/scsi_tcq.h>
23 #include <scsi/scsicam.h>
24 #include <scsi/scsi_transport.h>
25 #include <scsi/scsi_transport_fc.h>
26
27 #include "qla_target.h"
28
29 /*
30 * Driver version
31 */
32 char qla2x00_version_str[40];
33
34 static int apidev_major;
35
36 /*
37 * SRB allocation cache
38 */
39 struct kmem_cache *srb_cachep;
40
41 static struct trace_array *qla_trc_array;
42
43 int ql2xfulldump_on_mpifail;
44 module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
46 "Set this to take full dump on MPI hang.");
47
48 int ql2xenforce_iocb_limit = 2;
49 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(ql2xenforce_iocb_limit,
51 "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
52 "1: track usage per queue, 2: track usage per adapter");
53
54 /*
55 * CT6 CTX allocation cache
56 */
57 static struct kmem_cache *ctx_cachep;
58 /*
59 * error level for logging
60 */
61 uint ql_errlev = 0x8001;
62
63 int ql2xsecenable;
64 module_param(ql2xsecenable, int, S_IRUGO);
65 MODULE_PARM_DESC(ql2xsecenable,
66 "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
67
68 static int ql2xenableclass2;
69 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
70 MODULE_PARM_DESC(ql2xenableclass2,
71 "Specify if Class 2 operations are supported from the very "
72 "beginning. Default is 0 - class 2 not supported.");
73
74
75 int ql2xlogintimeout = 20;
76 module_param(ql2xlogintimeout, int, S_IRUGO);
77 MODULE_PARM_DESC(ql2xlogintimeout,
78 "Login timeout value in seconds.");
79
80 int qlport_down_retry;
81 module_param(qlport_down_retry, int, S_IRUGO);
82 MODULE_PARM_DESC(qlport_down_retry,
83 "Maximum number of command retries to a port that returns "
84 "a PORT-DOWN status.");
85
86 int ql2xplogiabsentdevice;
87 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(ql2xplogiabsentdevice,
89 "Option to enable PLOGI to devices that are not present after "
90 "a Fabric scan. This is needed for several broken switches. "
91 "Default is 0 - no PLOGI. 1 - perform PLOGI.");
92
93 int ql2xloginretrycount;
94 module_param(ql2xloginretrycount, int, S_IRUGO);
95 MODULE_PARM_DESC(ql2xloginretrycount,
96 "Specify an alternate value for the NVRAM login retry count.");
97
98 int ql2xallocfwdump = 1;
99 module_param(ql2xallocfwdump, int, S_IRUGO);
100 MODULE_PARM_DESC(ql2xallocfwdump,
101 "Option to enable allocation of memory for a firmware dump "
102 "during HBA initialization. Memory allocation requirements "
103 "vary by ISP type. Default is 1 - allocate memory.");
104
105 int ql2xextended_error_logging;
106 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
107 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
108 MODULE_PARM_DESC(ql2xextended_error_logging,
109 "Option to enable extended error logging,\n"
110 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
111 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
112 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
113 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
114 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
115 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
116 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
117 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
118 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
119 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
120 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
121 "\t\t0x1e400000 - Preferred value for capturing essential "
122 "debug information (equivalent to old "
123 "ql2xextended_error_logging=1).\n"
124 "\t\tDo LOGICAL OR of the value to enable more than one level");
125
126 int ql2xextended_error_logging_ktrace = 1;
127 module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
128 MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
129 "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
130
131 int ql2xshiftctondsd = 6;
132 module_param(ql2xshiftctondsd, int, S_IRUGO);
133 MODULE_PARM_DESC(ql2xshiftctondsd,
134 "Set to control shifting of command type processing "
135 "based on total number of SG elements.");
136
137 int ql2xfdmienable = 1;
138 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
139 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
140 MODULE_PARM_DESC(ql2xfdmienable,
141 "Enables FDMI registrations. "
142 "0 - no FDMI registrations. "
143 "1 - provide FDMI registrations (default).");
144
145 #define MAX_Q_DEPTH 64
146 static int ql2xmaxqdepth = MAX_Q_DEPTH;
147 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
148 MODULE_PARM_DESC(ql2xmaxqdepth,
149 "Maximum queue depth to set for each LUN. "
150 "Default is 64.");
151
152 int ql2xenabledif = 2;
153 module_param(ql2xenabledif, int, S_IRUGO);
154 MODULE_PARM_DESC(ql2xenabledif,
155 " Enable T10-CRC-DIF:\n"
156 " Default is 2.\n"
157 " 0 -- No DIF Support\n"
158 " 1 -- Enable DIF for all types\n"
159 " 2 -- Enable DIF for all types, except Type 0.\n");
160
161 #if (IS_ENABLED(CONFIG_NVME_FC))
162 int ql2xnvmeenable = 1;
163 #else
164 int ql2xnvmeenable;
165 #endif
166 module_param(ql2xnvmeenable, int, 0644);
167 MODULE_PARM_DESC(ql2xnvmeenable,
168 "Enables NVME support. "
169 "0 - no NVMe. Default is Y");
170
171 int ql2xenablehba_err_chk = 2;
172 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
173 MODULE_PARM_DESC(ql2xenablehba_err_chk,
174 " Enable T10-CRC-DIF Error isolation by HBA:\n"
175 " Default is 2.\n"
176 " 0 -- Error isolation disabled\n"
177 " 1 -- Error isolation enabled only for DIX Type 0\n"
178 " 2 -- Error isolation enabled for all Types\n");
179
180 int ql2xiidmaenable = 1;
181 module_param(ql2xiidmaenable, int, S_IRUGO);
182 MODULE_PARM_DESC(ql2xiidmaenable,
183 "Enables iIDMA settings "
184 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
185
186 int ql2xmqsupport = 1;
187 module_param(ql2xmqsupport, int, S_IRUGO);
188 MODULE_PARM_DESC(ql2xmqsupport,
189 "Enable on demand multiple queue pairs support "
190 "Default is 1 for supported. "
191 "Set it to 0 to turn off mq qpair support.");
192
193 int ql2xfwloadbin;
194 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
195 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
196 MODULE_PARM_DESC(ql2xfwloadbin,
197 "Option to specify location from which to load ISP firmware:.\n"
198 " 2 -- load firmware via the request_firmware() (hotplug).\n"
199 " interface.\n"
200 " 1 -- load firmware from flash.\n"
201 " 0 -- use default semantics.\n");
202
203 int ql2xetsenable;
204 module_param(ql2xetsenable, int, S_IRUGO);
205 MODULE_PARM_DESC(ql2xetsenable,
206 "Enables firmware ETS burst."
207 "Default is 0 - skip ETS enablement.");
208
209 int ql2xdbwr = 1;
210 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
211 MODULE_PARM_DESC(ql2xdbwr,
212 "Option to specify scheme for request queue posting.\n"
213 " 0 -- Regular doorbell.\n"
214 " 1 -- CAMRAM doorbell (faster).\n");
215
216 int ql2xgffidenable;
217 module_param(ql2xgffidenable, int, S_IRUGO);
218 MODULE_PARM_DESC(ql2xgffidenable,
219 "Enables GFF_ID checks of port type. "
220 "Default is 0 - Do not use GFF_ID information.");
221
222 int ql2xasynctmfenable = 1;
223 module_param(ql2xasynctmfenable, int, S_IRUGO);
224 MODULE_PARM_DESC(ql2xasynctmfenable,
225 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
226 "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
227
228 int ql2xdontresethba;
229 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
230 MODULE_PARM_DESC(ql2xdontresethba,
231 "Option to specify reset behaviour.\n"
232 " 0 (Default) -- Reset on failure.\n"
233 " 1 -- Do not reset on failure.\n");
234
235 uint64_t ql2xmaxlun = MAX_LUNS;
236 module_param(ql2xmaxlun, ullong, S_IRUGO);
237 MODULE_PARM_DESC(ql2xmaxlun,
238 "Defines the maximum LU number to register with the SCSI "
239 "midlayer. Default is 65535.");
240
241 int ql2xmdcapmask = 0x1F;
242 module_param(ql2xmdcapmask, int, S_IRUGO);
243 MODULE_PARM_DESC(ql2xmdcapmask,
244 "Set the Minidump driver capture mask level. "
245 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
246
247 int ql2xmdenable = 1;
248 module_param(ql2xmdenable, int, S_IRUGO);
249 MODULE_PARM_DESC(ql2xmdenable,
250 "Enable/disable MiniDump. "
251 "0 - MiniDump disabled. "
252 "1 (Default) - MiniDump enabled.");
253
254 int ql2xexlogins;
255 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
256 MODULE_PARM_DESC(ql2xexlogins,
257 "Number of extended Logins. "
258 "0 (Default)- Disabled.");
259
260 int ql2xexchoffld = 1024;
261 module_param(ql2xexchoffld, uint, 0644);
262 MODULE_PARM_DESC(ql2xexchoffld,
263 "Number of target exchanges.");
264
265 int ql2xiniexchg = 1024;
266 module_param(ql2xiniexchg, uint, 0644);
267 MODULE_PARM_DESC(ql2xiniexchg,
268 "Number of initiator exchanges.");
269
270 int ql2xfwholdabts;
271 module_param(ql2xfwholdabts, int, S_IRUGO);
272 MODULE_PARM_DESC(ql2xfwholdabts,
273 "Allow FW to hold status IOCB until ABTS rsp received. "
274 "0 (Default) Do not set fw option. "
275 "1 - Set fw option to hold ABTS.");
276
277 int ql2xmvasynctoatio = 1;
278 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
279 MODULE_PARM_DESC(ql2xmvasynctoatio,
280 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
281 "0 (Default). Do not move IOCBs"
282 "1 - Move IOCBs.");
283
284 int ql2xautodetectsfp = 1;
285 module_param(ql2xautodetectsfp, int, 0444);
286 MODULE_PARM_DESC(ql2xautodetectsfp,
287 "Detect SFP range and set appropriate distance.\n"
288 "1 (Default): Enable\n");
289
290 int ql2xenablemsix = 1;
291 module_param(ql2xenablemsix, int, 0444);
292 MODULE_PARM_DESC(ql2xenablemsix,
293 "Set to enable MSI or MSI-X interrupt mechanism.\n"
294 " Default is 1, enable MSI-X interrupt mechanism.\n"
295 " 0 -- enable traditional pin-based mechanism.\n"
296 " 1 -- enable MSI-X interrupt mechanism.\n"
297 " 2 -- enable MSI interrupt mechanism.\n");
298
299 int qla2xuseresexchforels;
300 module_param(qla2xuseresexchforels, int, 0444);
301 MODULE_PARM_DESC(qla2xuseresexchforels,
302 "Reserve 1/2 of emergency exchanges for ELS.\n"
303 " 0 (default): disabled");
304
305 static int ql2xprotmask;
306 module_param(ql2xprotmask, int, 0644);
307 MODULE_PARM_DESC(ql2xprotmask,
308 "Override DIF/DIX protection capabilities mask\n"
309 "Default is 0 which sets protection mask based on "
310 "capabilities reported by HBA firmware.\n");
311
312 static int ql2xprotguard;
313 module_param(ql2xprotguard, int, 0644);
314 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
315 " 0 -- Let HBA firmware decide\n"
316 " 1 -- Force T10 CRC\n"
317 " 2 -- Force IP checksum\n");
318
319 int ql2xdifbundlinginternalbuffers;
320 module_param(ql2xdifbundlinginternalbuffers, int, 0644);
321 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
322 "Force using internal buffers for DIF information\n"
323 "0 (Default). Based on check.\n"
324 "1 Force using internal buffers\n");
325
326 int ql2xsmartsan;
327 module_param(ql2xsmartsan, int, 0444);
328 module_param_named(smartsan, ql2xsmartsan, int, 0444);
329 MODULE_PARM_DESC(ql2xsmartsan,
330 "Send SmartSAN Management Attributes for FDMI Registration."
331 " Default is 0 - No SmartSAN registration,"
332 " 1 - Register SmartSAN Management Attributes.");
333
334 int ql2xrdpenable;
335 module_param(ql2xrdpenable, int, 0444);
336 module_param_named(rdpenable, ql2xrdpenable, int, 0444);
337 MODULE_PARM_DESC(ql2xrdpenable,
338 "Enables RDP responses. "
339 "0 - no RDP responses (default). "
340 "1 - provide RDP responses.");
341 int ql2xabts_wait_nvme = 1;
342 module_param(ql2xabts_wait_nvme, int, 0444);
343 MODULE_PARM_DESC(ql2xabts_wait_nvme,
344 "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
345
346
347 static u32 ql2xdelay_before_pci_error_handling = 5;
348 module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
349 MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
350 "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
351
352 static void qla2x00_clear_drv_active(struct qla_hw_data *);
353 static void qla2x00_free_device(scsi_qla_host_t *);
354 static void qla2xxx_map_queues(struct Scsi_Host *shost);
355 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
356
357 u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
358 module_param(ql2xnvme_queues, uint, S_IRUGO);
359 MODULE_PARM_DESC(ql2xnvme_queues,
360 "Number of NVMe Queues that can be configured.\n"
361 "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
362 "1 - Minimum number of queues supported\n"
363 "8 - Default value");
364
365 int ql2xfc2target = 1;
366 module_param(ql2xfc2target, int, 0444);
367 MODULE_PARM_DESC(qla2xfc2target,
368 "Enables FC2 Target support. "
369 "0 - FC2 Target support is disabled. "
370 "1 - FC2 Target support is enabled (default).");
371
372 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
373 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
374
375 /* TODO Convert to inlines
376 *
377 * Timer routines
378 */
379
380 __inline__ void
qla2x00_start_timer(scsi_qla_host_t * vha,unsigned long interval)381 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
382 {
383 timer_setup(&vha->timer, qla2x00_timer, 0);
384 vha->timer.expires = jiffies + interval * HZ;
385 add_timer(&vha->timer);
386 vha->timer_active = 1;
387 }
388
389 static inline void
qla2x00_restart_timer(scsi_qla_host_t * vha,unsigned long interval)390 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
391 {
392 /* Currently used for 82XX only. */
393 if (vha->device_flags & DFLG_DEV_FAILED) {
394 ql_dbg(ql_dbg_timer, vha, 0x600d,
395 "Device in a failed state, returning.\n");
396 return;
397 }
398
399 mod_timer(&vha->timer, jiffies + interval * HZ);
400 }
401
402 static __inline__ void
qla2x00_stop_timer(scsi_qla_host_t * vha)403 qla2x00_stop_timer(scsi_qla_host_t *vha)
404 {
405 del_timer_sync(&vha->timer);
406 vha->timer_active = 0;
407 }
408
409 static int qla2x00_do_dpc(void *data);
410
411 static void qla2x00_rst_aen(scsi_qla_host_t *);
412
413 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
414 struct req_que **, struct rsp_que **);
415 static void qla2x00_free_fw_dump(struct qla_hw_data *);
416 static void qla2x00_mem_free(struct qla_hw_data *);
417 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
418 struct qla_qpair *qpair);
419
420 /* -------------------------------------------------------------------------- */
qla_init_base_qpair(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp)421 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
422 struct rsp_que *rsp)
423 {
424 struct qla_hw_data *ha = vha->hw;
425
426 rsp->qpair = ha->base_qpair;
427 rsp->req = req;
428 ha->base_qpair->hw = ha;
429 ha->base_qpair->req = req;
430 ha->base_qpair->rsp = rsp;
431 ha->base_qpair->vha = vha;
432 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
433 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
434 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
435 ha->base_qpair->srb_mempool = ha->srb_mempool;
436 INIT_LIST_HEAD(&ha->base_qpair->hints_list);
437 INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
438 ha->base_qpair->enable_class_2 = ql2xenableclass2;
439 /* init qpair to this cpu. Will adjust at run time. */
440 qla_cpu_update(rsp->qpair, raw_smp_processor_id());
441 ha->base_qpair->pdev = ha->pdev;
442
443 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
444 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
445 }
446
qla2x00_alloc_queues(struct qla_hw_data * ha,struct req_que * req,struct rsp_que * rsp)447 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
448 struct rsp_que *rsp)
449 {
450 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
451
452 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
453 GFP_KERNEL);
454 if (!ha->req_q_map) {
455 ql_log(ql_log_fatal, vha, 0x003b,
456 "Unable to allocate memory for request queue ptrs.\n");
457 goto fail_req_map;
458 }
459
460 ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
461 GFP_KERNEL);
462 if (!ha->rsp_q_map) {
463 ql_log(ql_log_fatal, vha, 0x003c,
464 "Unable to allocate memory for response queue ptrs.\n");
465 goto fail_rsp_map;
466 }
467
468 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
469 if (ha->base_qpair == NULL) {
470 ql_log(ql_log_warn, vha, 0x00e0,
471 "Failed to allocate base queue pair memory.\n");
472 goto fail_base_qpair;
473 }
474
475 qla_init_base_qpair(vha, req, rsp);
476
477 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
478 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
479 GFP_KERNEL);
480 if (!ha->queue_pair_map) {
481 ql_log(ql_log_fatal, vha, 0x0180,
482 "Unable to allocate memory for queue pair ptrs.\n");
483 goto fail_qpair_map;
484 }
485 if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
486 kfree(ha->queue_pair_map);
487 ha->queue_pair_map = NULL;
488 goto fail_qpair_map;
489 }
490 }
491
492 /*
493 * Make sure we record at least the request and response queue zero in
494 * case we need to free them if part of the probe fails.
495 */
496 ha->rsp_q_map[0] = rsp;
497 ha->req_q_map[0] = req;
498 set_bit(0, ha->rsp_qid_map);
499 set_bit(0, ha->req_qid_map);
500 return 0;
501
502 fail_qpair_map:
503 kfree(ha->base_qpair);
504 ha->base_qpair = NULL;
505 fail_base_qpair:
506 kfree(ha->rsp_q_map);
507 ha->rsp_q_map = NULL;
508 fail_rsp_map:
509 kfree(ha->req_q_map);
510 ha->req_q_map = NULL;
511 fail_req_map:
512 return -ENOMEM;
513 }
514
qla2x00_free_req_que(struct qla_hw_data * ha,struct req_que * req)515 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
516 {
517 if (IS_QLAFX00(ha)) {
518 if (req && req->ring_fx00)
519 dma_free_coherent(&ha->pdev->dev,
520 (req->length_fx00 + 1) * sizeof(request_t),
521 req->ring_fx00, req->dma_fx00);
522 } else if (req && req->ring)
523 dma_free_coherent(&ha->pdev->dev,
524 (req->length + 1) * sizeof(request_t),
525 req->ring, req->dma);
526
527 if (req)
528 kfree(req->outstanding_cmds);
529
530 kfree(req);
531 }
532
qla2x00_free_rsp_que(struct qla_hw_data * ha,struct rsp_que * rsp)533 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
534 {
535 if (IS_QLAFX00(ha)) {
536 if (rsp && rsp->ring_fx00)
537 dma_free_coherent(&ha->pdev->dev,
538 (rsp->length_fx00 + 1) * sizeof(request_t),
539 rsp->ring_fx00, rsp->dma_fx00);
540 } else if (rsp && rsp->ring) {
541 dma_free_coherent(&ha->pdev->dev,
542 (rsp->length + 1) * sizeof(response_t),
543 rsp->ring, rsp->dma);
544 }
545 kfree(rsp);
546 }
547
qla2x00_free_queues(struct qla_hw_data * ha)548 static void qla2x00_free_queues(struct qla_hw_data *ha)
549 {
550 struct req_que *req;
551 struct rsp_que *rsp;
552 int cnt;
553 unsigned long flags;
554
555 if (ha->queue_pair_map) {
556 kfree(ha->queue_pair_map);
557 ha->queue_pair_map = NULL;
558 }
559 if (ha->base_qpair) {
560 kfree(ha->base_qpair);
561 ha->base_qpair = NULL;
562 }
563
564 qla_mapq_free_qp_cpu_map(ha);
565 spin_lock_irqsave(&ha->hardware_lock, flags);
566 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
567 if (!test_bit(cnt, ha->req_qid_map))
568 continue;
569
570 req = ha->req_q_map[cnt];
571 clear_bit(cnt, ha->req_qid_map);
572 ha->req_q_map[cnt] = NULL;
573
574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
575 qla2x00_free_req_que(ha, req);
576 spin_lock_irqsave(&ha->hardware_lock, flags);
577 }
578 spin_unlock_irqrestore(&ha->hardware_lock, flags);
579
580 kfree(ha->req_q_map);
581 ha->req_q_map = NULL;
582
583
584 spin_lock_irqsave(&ha->hardware_lock, flags);
585 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
586 if (!test_bit(cnt, ha->rsp_qid_map))
587 continue;
588
589 rsp = ha->rsp_q_map[cnt];
590 clear_bit(cnt, ha->rsp_qid_map);
591 ha->rsp_q_map[cnt] = NULL;
592 spin_unlock_irqrestore(&ha->hardware_lock, flags);
593 qla2x00_free_rsp_que(ha, rsp);
594 spin_lock_irqsave(&ha->hardware_lock, flags);
595 }
596 spin_unlock_irqrestore(&ha->hardware_lock, flags);
597
598 kfree(ha->rsp_q_map);
599 ha->rsp_q_map = NULL;
600 }
601
602 static char *
qla2x00_pci_info_str(struct scsi_qla_host * vha,char * str,size_t str_len)603 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
604 {
605 struct qla_hw_data *ha = vha->hw;
606 static const char *const pci_bus_modes[] = {
607 "33", "66", "100", "133",
608 };
609 uint16_t pci_bus;
610
611 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
612 if (pci_bus) {
613 snprintf(str, str_len, "PCI-X (%s MHz)",
614 pci_bus_modes[pci_bus]);
615 } else {
616 pci_bus = (ha->pci_attr & BIT_8) >> 8;
617 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
618 }
619
620 return str;
621 }
622
623 static char *
qla24xx_pci_info_str(struct scsi_qla_host * vha,char * str,size_t str_len)624 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
625 {
626 static const char *const pci_bus_modes[] = {
627 "33", "66", "100", "133",
628 };
629 struct qla_hw_data *ha = vha->hw;
630 uint32_t pci_bus;
631
632 if (pci_is_pcie(ha->pdev)) {
633 uint32_t lstat, lspeed, lwidth;
634 const char *speed_str;
635
636 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
637 lspeed = FIELD_GET(PCI_EXP_LNKCAP_SLS, lstat);
638 lwidth = FIELD_GET(PCI_EXP_LNKCAP_MLW, lstat);
639
640 switch (lspeed) {
641 case 1:
642 speed_str = "2.5GT/s";
643 break;
644 case 2:
645 speed_str = "5.0GT/s";
646 break;
647 case 3:
648 speed_str = "8.0GT/s";
649 break;
650 case 4:
651 speed_str = "16.0GT/s";
652 break;
653 default:
654 speed_str = "<unknown>";
655 break;
656 }
657 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
658
659 return str;
660 }
661
662 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
663 if (pci_bus == 0 || pci_bus == 8)
664 snprintf(str, str_len, "PCI (%s MHz)",
665 pci_bus_modes[pci_bus >> 3]);
666 else
667 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
668 pci_bus & 4 ? 2 : 1,
669 pci_bus_modes[pci_bus & 3]);
670
671 return str;
672 }
673
674 static char *
qla2x00_fw_version_str(struct scsi_qla_host * vha,char * str,size_t size)675 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
676 {
677 char un_str[10];
678 struct qla_hw_data *ha = vha->hw;
679
680 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
681 ha->fw_minor_version, ha->fw_subminor_version);
682
683 if (ha->fw_attributes & BIT_9) {
684 strcat(str, "FLX");
685 return (str);
686 }
687
688 switch (ha->fw_attributes & 0xFF) {
689 case 0x7:
690 strcat(str, "EF");
691 break;
692 case 0x17:
693 strcat(str, "TP");
694 break;
695 case 0x37:
696 strcat(str, "IP");
697 break;
698 case 0x77:
699 strcat(str, "VI");
700 break;
701 default:
702 sprintf(un_str, "(%x)", ha->fw_attributes);
703 strcat(str, un_str);
704 break;
705 }
706 if (ha->fw_attributes & 0x100)
707 strcat(str, "X");
708
709 return (str);
710 }
711
712 static char *
qla24xx_fw_version_str(struct scsi_qla_host * vha,char * str,size_t size)713 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
714 {
715 struct qla_hw_data *ha = vha->hw;
716
717 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
718 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
719 return str;
720 }
721
qla2x00_sp_free_dma(srb_t * sp)722 void qla2x00_sp_free_dma(srb_t *sp)
723 {
724 struct qla_hw_data *ha = sp->vha->hw;
725 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
726
727 if (sp->flags & SRB_DMA_VALID) {
728 scsi_dma_unmap(cmd);
729 sp->flags &= ~SRB_DMA_VALID;
730 }
731
732 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
733 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
734 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
735 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
736 }
737
738 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
739 /* List assured to be having elements */
740 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
741 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
742 }
743
744 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
745 struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
746
747 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
748 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
749 }
750
751 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
752 struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
753
754 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
755 ctx1->fcp_cmnd_dma);
756 list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
757 sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
758 sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
759 }
760
761 if (sp->flags & SRB_GOT_BUF)
762 qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
763 }
764
qla2x00_sp_compl(srb_t * sp,int res)765 void qla2x00_sp_compl(srb_t *sp, int res)
766 {
767 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
768 struct completion *comp = sp->comp;
769
770 /* kref: INIT */
771 kref_put(&sp->cmd_kref, qla2x00_sp_release);
772 cmd->result = res;
773 sp->type = 0;
774 scsi_done(cmd);
775 if (comp)
776 complete(comp);
777 }
778
qla2xxx_qpair_sp_free_dma(srb_t * sp)779 void qla2xxx_qpair_sp_free_dma(srb_t *sp)
780 {
781 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
782 struct qla_hw_data *ha = sp->fcport->vha->hw;
783
784 if (sp->flags & SRB_DMA_VALID) {
785 scsi_dma_unmap(cmd);
786 sp->flags &= ~SRB_DMA_VALID;
787 }
788
789 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
790 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
791 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
792 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
793 }
794
795 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
796 /* List assured to be having elements */
797 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
798 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
799 }
800
801 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
802 struct crc_context *difctx = sp->u.scmd.crc_ctx;
803 struct dsd_dma *dif_dsd, *nxt_dsd;
804
805 list_for_each_entry_safe(dif_dsd, nxt_dsd,
806 &difctx->ldif_dma_hndl_list, list) {
807 list_del(&dif_dsd->list);
808 dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
809 dif_dsd->dsd_list_dma);
810 kfree(dif_dsd);
811 difctx->no_dif_bundl--;
812 }
813
814 list_for_each_entry_safe(dif_dsd, nxt_dsd,
815 &difctx->ldif_dsd_list, list) {
816 list_del(&dif_dsd->list);
817 dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
818 dif_dsd->dsd_list_dma);
819 kfree(dif_dsd);
820 difctx->no_ldif_dsd--;
821 }
822
823 if (difctx->no_ldif_dsd) {
824 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
825 "%s: difctx->no_ldif_dsd=%x\n",
826 __func__, difctx->no_ldif_dsd);
827 }
828
829 if (difctx->no_dif_bundl) {
830 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
831 "%s: difctx->no_dif_bundl=%x\n",
832 __func__, difctx->no_dif_bundl);
833 }
834 sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
835 }
836
837 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
838 struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
839
840 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
841 ctx1->fcp_cmnd_dma);
842 list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
843 sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
844 sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
845 sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
846 }
847
848 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
849 struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
850
851 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
852 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
853 }
854
855 if (sp->flags & SRB_GOT_BUF)
856 qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
857 }
858
qla2xxx_qpair_sp_compl(srb_t * sp,int res)859 void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
860 {
861 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
862 struct completion *comp = sp->comp;
863
864 /* ref: INIT */
865 kref_put(&sp->cmd_kref, qla2x00_sp_release);
866 cmd->result = res;
867 sp->type = 0;
868 scsi_done(cmd);
869 if (comp)
870 complete(comp);
871 }
872
873 static int
qla2xxx_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd)874 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
875 {
876 scsi_qla_host_t *vha = shost_priv(host);
877 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
878 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
879 struct qla_hw_data *ha = vha->hw;
880 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
881 srb_t *sp;
882 int rval;
883
884 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
885 WARN_ON_ONCE(!rport)) {
886 cmd->result = DID_NO_CONNECT << 16;
887 goto qc24_fail_command;
888 }
889
890 if (ha->mqenable) {
891 uint32_t tag;
892 uint16_t hwq;
893 struct qla_qpair *qpair = NULL;
894
895 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
896 hwq = blk_mq_unique_tag_to_hwq(tag);
897 qpair = ha->queue_pair_map[hwq];
898
899 if (qpair)
900 return qla2xxx_mqueuecommand(host, cmd, qpair);
901 }
902
903 if (ha->flags.eeh_busy) {
904 if (ha->flags.pci_channel_io_perm_failure) {
905 ql_dbg(ql_dbg_aer, vha, 0x9010,
906 "PCI Channel IO permanent failure, exiting "
907 "cmd=%p.\n", cmd);
908 cmd->result = DID_NO_CONNECT << 16;
909 } else {
910 ql_dbg(ql_dbg_aer, vha, 0x9011,
911 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
912 cmd->result = DID_REQUEUE << 16;
913 }
914 goto qc24_fail_command;
915 }
916
917 rval = fc_remote_port_chkready(rport);
918 if (rval) {
919 cmd->result = rval;
920 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
921 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
922 cmd, rval);
923 goto qc24_fail_command;
924 }
925
926 if (!vha->flags.difdix_supported &&
927 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
928 ql_dbg(ql_dbg_io, vha, 0x3004,
929 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
930 cmd);
931 cmd->result = DID_NO_CONNECT << 16;
932 goto qc24_fail_command;
933 }
934
935 if (!fcport || fcport->deleted) {
936 cmd->result = DID_IMM_RETRY << 16;
937 goto qc24_fail_command;
938 }
939
940 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
941 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
942 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
943 ql_dbg(ql_dbg_io, vha, 0x3005,
944 "Returning DNC, fcport_state=%d loop_state=%d.\n",
945 atomic_read(&fcport->state),
946 atomic_read(&base_vha->loop_state));
947 cmd->result = DID_NO_CONNECT << 16;
948 goto qc24_fail_command;
949 }
950 goto qc24_target_busy;
951 }
952
953 /*
954 * Return target busy if we've received a non-zero retry_delay_timer
955 * in a FCP_RSP.
956 */
957 if (fcport->retry_delay_timestamp == 0) {
958 /* retry delay not set */
959 } else if (time_after(jiffies, fcport->retry_delay_timestamp))
960 fcport->retry_delay_timestamp = 0;
961 else
962 goto qc24_target_busy;
963
964 sp = scsi_cmd_priv(cmd);
965 /* ref: INIT */
966 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
967
968 sp->u.scmd.cmd = cmd;
969 sp->type = SRB_SCSI_CMD;
970 sp->free = qla2x00_sp_free_dma;
971 sp->done = qla2x00_sp_compl;
972
973 rval = ha->isp_ops->start_scsi(sp);
974 if (rval != QLA_SUCCESS) {
975 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
976 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
977 goto qc24_host_busy_free_sp;
978 }
979
980 return 0;
981
982 qc24_host_busy_free_sp:
983 /* ref: INIT */
984 kref_put(&sp->cmd_kref, qla2x00_sp_release);
985
986 qc24_target_busy:
987 return SCSI_MLQUEUE_TARGET_BUSY;
988
989 qc24_fail_command:
990 scsi_done(cmd);
991
992 return 0;
993 }
994
995 /* For MQ supported I/O */
996 int
qla2xxx_mqueuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd,struct qla_qpair * qpair)997 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
998 struct qla_qpair *qpair)
999 {
1000 scsi_qla_host_t *vha = shost_priv(host);
1001 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1002 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
1003 struct qla_hw_data *ha = vha->hw;
1004 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1005 srb_t *sp;
1006 int rval;
1007
1008 rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
1009 if (rval) {
1010 cmd->result = rval;
1011 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
1012 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
1013 cmd, rval);
1014 goto qc24_fail_command;
1015 }
1016
1017 if (!qpair->online) {
1018 ql_dbg(ql_dbg_io, vha, 0x3077,
1019 "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
1020 cmd->result = DID_NO_CONNECT << 16;
1021 goto qc24_fail_command;
1022 }
1023
1024 if (!fcport || fcport->deleted) {
1025 cmd->result = DID_IMM_RETRY << 16;
1026 goto qc24_fail_command;
1027 }
1028
1029 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
1030 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
1031 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1032 ql_dbg(ql_dbg_io, vha, 0x3077,
1033 "Returning DNC, fcport_state=%d loop_state=%d.\n",
1034 atomic_read(&fcport->state),
1035 atomic_read(&base_vha->loop_state));
1036 cmd->result = DID_NO_CONNECT << 16;
1037 goto qc24_fail_command;
1038 }
1039 goto qc24_target_busy;
1040 }
1041
1042 /*
1043 * Return target busy if we've received a non-zero retry_delay_timer
1044 * in a FCP_RSP.
1045 */
1046 if (fcport->retry_delay_timestamp == 0) {
1047 /* retry delay not set */
1048 } else if (time_after(jiffies, fcport->retry_delay_timestamp))
1049 fcport->retry_delay_timestamp = 0;
1050 else
1051 goto qc24_target_busy;
1052
1053 sp = scsi_cmd_priv(cmd);
1054 /* ref: INIT */
1055 qla2xxx_init_sp(sp, vha, qpair, fcport);
1056
1057 sp->u.scmd.cmd = cmd;
1058 sp->type = SRB_SCSI_CMD;
1059 sp->free = qla2xxx_qpair_sp_free_dma;
1060 sp->done = qla2xxx_qpair_sp_compl;
1061
1062 rval = ha->isp_ops->start_scsi_mq(sp);
1063 if (rval != QLA_SUCCESS) {
1064 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
1065 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
1066 goto qc24_host_busy_free_sp;
1067 }
1068
1069 return 0;
1070
1071 qc24_host_busy_free_sp:
1072 /* ref: INIT */
1073 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1074
1075 qc24_target_busy:
1076 return SCSI_MLQUEUE_TARGET_BUSY;
1077
1078 qc24_fail_command:
1079 scsi_done(cmd);
1080
1081 return 0;
1082 }
1083
1084 /*
1085 * qla2x00_wait_for_hba_online
1086 * Wait till the HBA is online after going through
1087 * <= MAX_RETRIES_OF_ISP_ABORT or
1088 * finally HBA is disabled ie marked offline
1089 *
1090 * Input:
1091 * ha - pointer to host adapter structure
1092 *
1093 * Note:
1094 * Does context switching-Release SPIN_LOCK
1095 * (if any) before calling this routine.
1096 *
1097 * Return:
1098 * Success (Adapter is online) : 0
1099 * Failed (Adapter is offline/disabled) : 1
1100 */
1101 int
qla2x00_wait_for_hba_online(scsi_qla_host_t * vha)1102 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1103 {
1104 int return_status;
1105 unsigned long wait_online;
1106 struct qla_hw_data *ha = vha->hw;
1107 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1108
1109 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1110 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1111 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1112 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1113 ha->dpc_active) && time_before(jiffies, wait_online)) {
1114
1115 msleep(1000);
1116 }
1117 if (base_vha->flags.online)
1118 return_status = QLA_SUCCESS;
1119 else
1120 return_status = QLA_FUNCTION_FAILED;
1121
1122 return (return_status);
1123 }
1124
test_fcport_count(scsi_qla_host_t * vha)1125 static inline int test_fcport_count(scsi_qla_host_t *vha)
1126 {
1127 struct qla_hw_data *ha = vha->hw;
1128 unsigned long flags;
1129 int res;
1130 /* Return 0 = sleep, x=wake */
1131
1132 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1133 ql_dbg(ql_dbg_init, vha, 0x00ec,
1134 "tgt %p, fcport_count=%d\n",
1135 vha, vha->fcport_count);
1136 res = (vha->fcport_count == 0);
1137 if (res) {
1138 struct fc_port *fcport;
1139
1140 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1141 if (fcport->deleted != QLA_SESS_DELETED) {
1142 /* session(s) may not be fully logged in
1143 * (ie fcport_count=0), but session
1144 * deletion thread(s) may be inflight.
1145 */
1146
1147 res = 0;
1148 break;
1149 }
1150 }
1151 }
1152 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1153
1154 return res;
1155 }
1156
1157 /*
1158 * qla2x00_wait_for_sess_deletion can only be called from remove_one.
1159 * it has dependency on UNLOADING flag to stop device discovery
1160 */
1161 void
qla2x00_wait_for_sess_deletion(scsi_qla_host_t * vha)1162 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1163 {
1164 u8 i;
1165
1166 qla2x00_mark_all_devices_lost(vha);
1167
1168 for (i = 0; i < 10; i++) {
1169 if (wait_event_timeout(vha->fcport_waitQ,
1170 test_fcport_count(vha), HZ) > 0)
1171 break;
1172 }
1173
1174 flush_workqueue(vha->hw->wq);
1175 }
1176
1177 /*
1178 * qla2x00_wait_for_hba_ready
1179 * Wait till the HBA is ready before doing driver unload
1180 *
1181 * Input:
1182 * ha - pointer to host adapter structure
1183 *
1184 * Note:
1185 * Does context switching-Release SPIN_LOCK
1186 * (if any) before calling this routine.
1187 *
1188 */
1189 static void
qla2x00_wait_for_hba_ready(scsi_qla_host_t * vha)1190 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
1191 {
1192 struct qla_hw_data *ha = vha->hw;
1193 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1194
1195 while ((qla2x00_reset_active(vha) || ha->dpc_active ||
1196 ha->flags.mbox_busy) ||
1197 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
1198 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
1199 if (test_bit(UNLOADING, &base_vha->dpc_flags))
1200 break;
1201 msleep(1000);
1202 }
1203 }
1204
1205 int
qla2x00_wait_for_chip_reset(scsi_qla_host_t * vha)1206 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
1207 {
1208 int return_status;
1209 unsigned long wait_reset;
1210 struct qla_hw_data *ha = vha->hw;
1211 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1212
1213 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1214 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1215 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1216 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1217 ha->dpc_active) && time_before(jiffies, wait_reset)) {
1218
1219 msleep(1000);
1220
1221 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1222 ha->flags.chip_reset_done)
1223 break;
1224 }
1225 if (ha->flags.chip_reset_done)
1226 return_status = QLA_SUCCESS;
1227 else
1228 return_status = QLA_FUNCTION_FAILED;
1229
1230 return return_status;
1231 }
1232
1233 /**************************************************************************
1234 * qla2xxx_eh_abort
1235 *
1236 * Description:
1237 * The abort function will abort the specified command.
1238 *
1239 * Input:
1240 * cmd = Linux SCSI command packet to be aborted.
1241 *
1242 * Returns:
1243 * Either SUCCESS or FAILED.
1244 *
1245 * Note:
1246 * Only return FAILED if command not returned by firmware.
1247 **************************************************************************/
1248 static int
qla2xxx_eh_abort(struct scsi_cmnd * cmd)1249 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1250 {
1251 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1252 DECLARE_COMPLETION_ONSTACK(comp);
1253 srb_t *sp;
1254 int ret;
1255 unsigned int id;
1256 uint64_t lun;
1257 int rval;
1258 struct qla_hw_data *ha = vha->hw;
1259 uint32_t ratov_j;
1260 struct qla_qpair *qpair;
1261 unsigned long flags;
1262 int fast_fail_status = SUCCESS;
1263
1264 if (qla2x00_isp_reg_stat(ha)) {
1265 ql_log(ql_log_info, vha, 0x8042,
1266 "PCI/Register disconnect, exiting.\n");
1267 qla_pci_set_eeh_busy(vha);
1268 return FAILED;
1269 }
1270
1271 /* Save any FAST_IO_FAIL value to return later if abort succeeds */
1272 ret = fc_block_scsi_eh(cmd);
1273 if (ret != 0)
1274 fast_fail_status = ret;
1275
1276 sp = scsi_cmd_priv(cmd);
1277 qpair = sp->qpair;
1278
1279 vha->cmd_timeout_cnt++;
1280
1281 if ((sp->fcport && sp->fcport->deleted) || !qpair)
1282 return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
1283
1284 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1285 sp->comp = ∁
1286 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1287
1288
1289 id = cmd->device->id;
1290 lun = cmd->device->lun;
1291
1292 ql_dbg(ql_dbg_taskm, vha, 0x8002,
1293 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
1294 vha->host_no, id, lun, sp, cmd, sp->handle);
1295
1296 /*
1297 * Abort will release the original Command/sp from FW. Let the
1298 * original command call scsi_done. In return, he will wakeup
1299 * this sleeping thread.
1300 */
1301 rval = ha->isp_ops->abort_command(sp);
1302
1303 ql_dbg(ql_dbg_taskm, vha, 0x8003,
1304 "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
1305
1306 /* Wait for the command completion. */
1307 ratov_j = ha->r_a_tov/10 * 4 * 1000;
1308 ratov_j = msecs_to_jiffies(ratov_j);
1309 switch (rval) {
1310 case QLA_SUCCESS:
1311 if (!wait_for_completion_timeout(&comp, ratov_j)) {
1312 ql_dbg(ql_dbg_taskm, vha, 0xffff,
1313 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1314 __func__, ha->r_a_tov/10);
1315 ret = FAILED;
1316 } else {
1317 ret = fast_fail_status;
1318 }
1319 break;
1320 default:
1321 ret = FAILED;
1322 break;
1323 }
1324
1325 sp->comp = NULL;
1326
1327 ql_log(ql_log_info, vha, 0x801c,
1328 "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
1329 vha->host_no, id, lun, ret);
1330
1331 return ret;
1332 }
1333
1334 #define ABORT_POLLING_PERIOD 1000
1335 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
1336
1337 /*
1338 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
1339 */
1340 static int
__qla2x00_eh_wait_for_pending_commands(struct qla_qpair * qpair,unsigned int t,uint64_t l,enum nexus_wait_type type)1341 __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
1342 uint64_t l, enum nexus_wait_type type)
1343 {
1344 int cnt, match, status;
1345 unsigned long flags;
1346 scsi_qla_host_t *vha = qpair->vha;
1347 struct req_que *req = qpair->req;
1348 srb_t *sp;
1349 struct scsi_cmnd *cmd;
1350 unsigned long wait_iter = ABORT_WAIT_ITER;
1351 bool found;
1352 struct qla_hw_data *ha = vha->hw;
1353
1354 status = QLA_SUCCESS;
1355
1356 while (wait_iter--) {
1357 found = false;
1358
1359 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1360 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1361 sp = req->outstanding_cmds[cnt];
1362 if (!sp)
1363 continue;
1364 if (sp->type != SRB_SCSI_CMD)
1365 continue;
1366 if (vha->vp_idx != sp->vha->vp_idx)
1367 continue;
1368 match = 0;
1369 cmd = GET_CMD_SP(sp);
1370 switch (type) {
1371 case WAIT_HOST:
1372 match = 1;
1373 break;
1374 case WAIT_TARGET:
1375 if (sp->fcport)
1376 match = sp->fcport->d_id.b24 == t;
1377 else
1378 match = 0;
1379 break;
1380 case WAIT_LUN:
1381 if (sp->fcport)
1382 match = (sp->fcport->d_id.b24 == t &&
1383 cmd->device->lun == l);
1384 else
1385 match = 0;
1386 break;
1387 }
1388 if (!match)
1389 continue;
1390
1391 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1392
1393 if (unlikely(pci_channel_offline(ha->pdev)) ||
1394 ha->flags.eeh_busy) {
1395 ql_dbg(ql_dbg_taskm, vha, 0x8005,
1396 "Return:eh_wait.\n");
1397 return status;
1398 }
1399
1400 /*
1401 * SRB_SCSI_CMD is still in the outstanding_cmds array.
1402 * it means scsi_done has not called. Wait for it to
1403 * clear from outstanding_cmds.
1404 */
1405 msleep(ABORT_POLLING_PERIOD);
1406 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1407 found = true;
1408 }
1409 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1410
1411 if (!found)
1412 break;
1413 }
1414
1415 if (wait_iter == -1)
1416 status = QLA_FUNCTION_FAILED;
1417
1418 return status;
1419 }
1420
1421 int
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t * vha,unsigned int t,uint64_t l,enum nexus_wait_type type)1422 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
1423 uint64_t l, enum nexus_wait_type type)
1424 {
1425 struct qla_qpair *qpair;
1426 struct qla_hw_data *ha = vha->hw;
1427 int i, status = QLA_SUCCESS;
1428
1429 status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
1430 type);
1431 for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
1432 qpair = ha->queue_pair_map[i];
1433 if (!qpair)
1434 continue;
1435 status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
1436 type);
1437 }
1438 return status;
1439 }
1440
1441 static char *reset_errors[] = {
1442 "HBA not online",
1443 "HBA not ready",
1444 "Task management failed",
1445 "Waiting for command completions",
1446 };
1447
1448 static int
qla2xxx_eh_device_reset(struct scsi_cmnd * cmd)1449 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1450 {
1451 struct scsi_device *sdev = cmd->device;
1452 scsi_qla_host_t *vha = shost_priv(sdev->host);
1453 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1454 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1455 struct qla_hw_data *ha = vha->hw;
1456 int err;
1457
1458 if (qla2x00_isp_reg_stat(ha)) {
1459 ql_log(ql_log_info, vha, 0x803e,
1460 "PCI/Register disconnect, exiting.\n");
1461 qla_pci_set_eeh_busy(vha);
1462 return FAILED;
1463 }
1464
1465 if (!fcport) {
1466 return FAILED;
1467 }
1468
1469 err = fc_block_rport(rport);
1470 if (err != 0)
1471 return err;
1472
1473 if (fcport->deleted)
1474 return FAILED;
1475
1476 ql_log(ql_log_info, vha, 0x8009,
1477 "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
1478 sdev->id, sdev->lun, cmd);
1479
1480 err = 0;
1481 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1482 ql_log(ql_log_warn, vha, 0x800a,
1483 "Wait for hba online failed for cmd=%p.\n", cmd);
1484 goto eh_reset_failed;
1485 }
1486 err = 2;
1487 if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
1488 != QLA_SUCCESS) {
1489 ql_log(ql_log_warn, vha, 0x800c,
1490 "do_reset failed for cmd=%p.\n", cmd);
1491 goto eh_reset_failed;
1492 }
1493 err = 3;
1494 if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
1495 cmd->device->lun,
1496 WAIT_LUN) != QLA_SUCCESS) {
1497 ql_log(ql_log_warn, vha, 0x800d,
1498 "wait for pending cmds failed for cmd=%p.\n", cmd);
1499 goto eh_reset_failed;
1500 }
1501
1502 ql_log(ql_log_info, vha, 0x800e,
1503 "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
1504 vha->host_no, sdev->id, sdev->lun, cmd);
1505
1506 return SUCCESS;
1507
1508 eh_reset_failed:
1509 ql_log(ql_log_info, vha, 0x800f,
1510 "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
1511 reset_errors[err], vha->host_no, sdev->id, sdev->lun,
1512 cmd);
1513 vha->reset_cmd_err_cnt++;
1514 return FAILED;
1515 }
1516
1517 static int
qla2xxx_eh_target_reset(struct scsi_cmnd * cmd)1518 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1519 {
1520 struct scsi_device *sdev = cmd->device;
1521 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1522 scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
1523 struct qla_hw_data *ha = vha->hw;
1524 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1525 int err;
1526
1527 if (qla2x00_isp_reg_stat(ha)) {
1528 ql_log(ql_log_info, vha, 0x803f,
1529 "PCI/Register disconnect, exiting.\n");
1530 qla_pci_set_eeh_busy(vha);
1531 return FAILED;
1532 }
1533
1534 if (!fcport) {
1535 return FAILED;
1536 }
1537
1538 err = fc_block_rport(rport);
1539 if (err != 0)
1540 return err;
1541
1542 if (fcport->deleted)
1543 return FAILED;
1544
1545 ql_log(ql_log_info, vha, 0x8009,
1546 "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
1547 sdev->id, cmd);
1548
1549 err = 0;
1550 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1551 ql_log(ql_log_warn, vha, 0x800a,
1552 "Wait for hba online failed for cmd=%p.\n", cmd);
1553 goto eh_reset_failed;
1554 }
1555 err = 2;
1556 if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
1557 ql_log(ql_log_warn, vha, 0x800c,
1558 "target_reset failed for cmd=%p.\n", cmd);
1559 goto eh_reset_failed;
1560 }
1561 err = 3;
1562 if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
1563 WAIT_TARGET) != QLA_SUCCESS) {
1564 ql_log(ql_log_warn, vha, 0x800d,
1565 "wait for pending cmds failed for cmd=%p.\n", cmd);
1566 goto eh_reset_failed;
1567 }
1568
1569 ql_log(ql_log_info, vha, 0x800e,
1570 "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
1571 vha->host_no, sdev->id, cmd);
1572
1573 return SUCCESS;
1574
1575 eh_reset_failed:
1576 ql_log(ql_log_info, vha, 0x800f,
1577 "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
1578 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1579 cmd);
1580 vha->reset_cmd_err_cnt++;
1581 return FAILED;
1582 }
1583
1584 /**************************************************************************
1585 * qla2xxx_eh_bus_reset
1586 *
1587 * Description:
1588 * The bus reset function will reset the bus and abort any executing
1589 * commands.
1590 *
1591 * Input:
1592 * cmd = Linux SCSI command packet of the command that cause the
1593 * bus reset.
1594 *
1595 * Returns:
1596 * SUCCESS/FAILURE (defined as macro in scsi.h).
1597 *
1598 **************************************************************************/
1599 static int
qla2xxx_eh_bus_reset(struct scsi_cmnd * cmd)1600 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1601 {
1602 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1603 int ret = FAILED;
1604 unsigned int id;
1605 uint64_t lun;
1606 struct qla_hw_data *ha = vha->hw;
1607
1608 if (qla2x00_isp_reg_stat(ha)) {
1609 ql_log(ql_log_info, vha, 0x8040,
1610 "PCI/Register disconnect, exiting.\n");
1611 qla_pci_set_eeh_busy(vha);
1612 return FAILED;
1613 }
1614
1615 id = cmd->device->id;
1616 lun = cmd->device->lun;
1617
1618 if (qla2x00_chip_is_down(vha))
1619 return ret;
1620
1621 ql_log(ql_log_info, vha, 0x8012,
1622 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1623
1624 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1625 ql_log(ql_log_fatal, vha, 0x8013,
1626 "Wait for hba online failed board disabled.\n");
1627 goto eh_bus_reset_done;
1628 }
1629
1630 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1631 ret = SUCCESS;
1632
1633 if (ret == FAILED)
1634 goto eh_bus_reset_done;
1635
1636 /* Flush outstanding commands. */
1637 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1638 QLA_SUCCESS) {
1639 ql_log(ql_log_warn, vha, 0x8014,
1640 "Wait for pending commands failed.\n");
1641 ret = FAILED;
1642 }
1643
1644 eh_bus_reset_done:
1645 ql_log(ql_log_warn, vha, 0x802b,
1646 "BUS RESET %s nexus=%ld:%d:%llu.\n",
1647 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1648
1649 return ret;
1650 }
1651
1652 /**************************************************************************
1653 * qla2xxx_eh_host_reset
1654 *
1655 * Description:
1656 * The reset function will reset the Adapter.
1657 *
1658 * Input:
1659 * cmd = Linux SCSI command packet of the command that cause the
1660 * adapter reset.
1661 *
1662 * Returns:
1663 * Either SUCCESS or FAILED.
1664 *
1665 * Note:
1666 **************************************************************************/
1667 static int
qla2xxx_eh_host_reset(struct scsi_cmnd * cmd)1668 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1669 {
1670 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1671 struct qla_hw_data *ha = vha->hw;
1672 int ret = FAILED;
1673 unsigned int id;
1674 uint64_t lun;
1675 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1676
1677 if (qla2x00_isp_reg_stat(ha)) {
1678 ql_log(ql_log_info, vha, 0x8041,
1679 "PCI/Register disconnect, exiting.\n");
1680 qla_pci_set_eeh_busy(vha);
1681 return SUCCESS;
1682 }
1683
1684 id = cmd->device->id;
1685 lun = cmd->device->lun;
1686
1687 ql_log(ql_log_info, vha, 0x8018,
1688 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1689
1690 /*
1691 * No point in issuing another reset if one is active. Also do not
1692 * attempt a reset if we are updating flash.
1693 */
1694 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
1695 goto eh_host_reset_lock;
1696
1697 if (vha != base_vha) {
1698 if (qla2x00_vp_abort_isp(vha))
1699 goto eh_host_reset_lock;
1700 } else {
1701 if (IS_P3P_TYPE(vha->hw)) {
1702 if (!qla82xx_fcoe_ctx_reset(vha)) {
1703 /* Ctx reset success */
1704 ret = SUCCESS;
1705 goto eh_host_reset_lock;
1706 }
1707 /* fall thru if ctx reset failed */
1708 }
1709 if (ha->wq)
1710 flush_workqueue(ha->wq);
1711
1712 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1713 if (ha->isp_ops->abort_isp(base_vha)) {
1714 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1715 /* failed. schedule dpc to try */
1716 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1717
1718 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1719 ql_log(ql_log_warn, vha, 0x802a,
1720 "wait for hba online failed.\n");
1721 goto eh_host_reset_lock;
1722 }
1723 }
1724 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1725 }
1726
1727 /* Waiting for command to be returned to OS.*/
1728 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1729 QLA_SUCCESS)
1730 ret = SUCCESS;
1731
1732 eh_host_reset_lock:
1733 ql_log(ql_log_info, vha, 0x8017,
1734 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
1735 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1736
1737 return ret;
1738 }
1739
1740 /*
1741 * qla2x00_loop_reset
1742 * Issue loop reset.
1743 *
1744 * Input:
1745 * ha = adapter block pointer.
1746 *
1747 * Returns:
1748 * 0 = success
1749 */
1750 int
qla2x00_loop_reset(scsi_qla_host_t * vha)1751 qla2x00_loop_reset(scsi_qla_host_t *vha)
1752 {
1753 int ret;
1754 struct qla_hw_data *ha = vha->hw;
1755
1756 if (IS_QLAFX00(ha))
1757 return QLA_SUCCESS;
1758
1759 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1760 atomic_set(&vha->loop_state, LOOP_DOWN);
1761 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1762 qla2x00_mark_all_devices_lost(vha);
1763 ret = qla2x00_full_login_lip(vha);
1764 if (ret != QLA_SUCCESS) {
1765 ql_dbg(ql_dbg_taskm, vha, 0x802d,
1766 "full_login_lip=%d.\n", ret);
1767 }
1768 }
1769
1770 if (ha->flags.enable_lip_reset) {
1771 ret = qla2x00_lip_reset(vha);
1772 if (ret != QLA_SUCCESS)
1773 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1774 "lip_reset failed (%d).\n", ret);
1775 }
1776
1777 /* Issue marker command only when we are going to start the I/O */
1778 vha->marker_needed = 1;
1779
1780 return QLA_SUCCESS;
1781 }
1782
1783 /*
1784 * The caller must ensure that no completion interrupts will happen
1785 * while this function is in progress.
1786 */
qla2x00_abort_srb(struct qla_qpair * qp,srb_t * sp,const int res,unsigned long * flags)1787 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1788 unsigned long *flags)
1789 __releases(qp->qp_lock_ptr)
1790 __acquires(qp->qp_lock_ptr)
1791 {
1792 DECLARE_COMPLETION_ONSTACK(comp);
1793 scsi_qla_host_t *vha = qp->vha;
1794 struct qla_hw_data *ha = vha->hw;
1795 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1796 int rval;
1797 bool ret_cmd;
1798 uint32_t ratov_j;
1799
1800 lockdep_assert_held(qp->qp_lock_ptr);
1801
1802 if (qla2x00_chip_is_down(vha)) {
1803 sp->done(sp, res);
1804 return;
1805 }
1806
1807 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
1808 (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
1809 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
1810 !qla2x00_isp_reg_stat(ha))) {
1811 if (sp->comp) {
1812 sp->done(sp, res);
1813 return;
1814 }
1815
1816 sp->comp = ∁
1817 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1818
1819 rval = ha->isp_ops->abort_command(sp);
1820 /* Wait for command completion. */
1821 ret_cmd = false;
1822 ratov_j = ha->r_a_tov/10 * 4 * 1000;
1823 ratov_j = msecs_to_jiffies(ratov_j);
1824 switch (rval) {
1825 case QLA_SUCCESS:
1826 if (wait_for_completion_timeout(&comp, ratov_j)) {
1827 ql_dbg(ql_dbg_taskm, vha, 0xffff,
1828 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1829 __func__, ha->r_a_tov/10);
1830 ret_cmd = true;
1831 }
1832 /* else FW return SP to driver */
1833 break;
1834 default:
1835 ret_cmd = true;
1836 break;
1837 }
1838
1839 spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1840 switch (sp->type) {
1841 case SRB_SCSI_CMD:
1842 if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
1843 sp->done(sp, res);
1844 break;
1845 default:
1846 if (ret_cmd)
1847 sp->done(sp, res);
1848 break;
1849 }
1850 } else {
1851 sp->done(sp, res);
1852 }
1853 }
1854
1855 /*
1856 * The caller must ensure that no completion interrupts will happen
1857 * while this function is in progress.
1858 */
1859 static void
__qla2x00_abort_all_cmds(struct qla_qpair * qp,int res)1860 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1861 {
1862 int cnt;
1863 unsigned long flags;
1864 srb_t *sp;
1865 scsi_qla_host_t *vha = qp->vha;
1866 struct qla_hw_data *ha = vha->hw;
1867 struct req_que *req;
1868 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1869 struct qla_tgt_cmd *cmd;
1870
1871 if (!ha->req_q_map)
1872 return;
1873 spin_lock_irqsave(qp->qp_lock_ptr, flags);
1874 req = qp->req;
1875 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1876 sp = req->outstanding_cmds[cnt];
1877 if (sp) {
1878 /*
1879 * perform lockless completion during driver unload
1880 */
1881 if (qla2x00_chip_is_down(vha)) {
1882 req->outstanding_cmds[cnt] = NULL;
1883 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1884 sp->done(sp, res);
1885 spin_lock_irqsave(qp->qp_lock_ptr, flags);
1886 continue;
1887 }
1888
1889 switch (sp->cmd_type) {
1890 case TYPE_SRB:
1891 qla2x00_abort_srb(qp, sp, res, &flags);
1892 break;
1893 case TYPE_TGT_CMD:
1894 if (!vha->hw->tgt.tgt_ops || !tgt ||
1895 qla_ini_mode_enabled(vha)) {
1896 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
1897 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1898 vha->dpc_flags);
1899 continue;
1900 }
1901 cmd = (struct qla_tgt_cmd *)sp;
1902 cmd->aborted = 1;
1903 break;
1904 case TYPE_TGT_TMCMD:
1905 /* Skip task management functions. */
1906 break;
1907 default:
1908 break;
1909 }
1910 req->outstanding_cmds[cnt] = NULL;
1911 }
1912 }
1913 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1914 }
1915
1916 /*
1917 * The caller must ensure that no completion interrupts will happen
1918 * while this function is in progress.
1919 */
1920 void
qla2x00_abort_all_cmds(scsi_qla_host_t * vha,int res)1921 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1922 {
1923 int que;
1924 struct qla_hw_data *ha = vha->hw;
1925
1926 /* Continue only if initialization complete. */
1927 if (!ha->base_qpair)
1928 return;
1929 __qla2x00_abort_all_cmds(ha->base_qpair, res);
1930
1931 if (!ha->queue_pair_map)
1932 return;
1933 for (que = 0; que < ha->max_qpairs; que++) {
1934 if (!ha->queue_pair_map[que])
1935 continue;
1936
1937 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
1938 }
1939 }
1940
1941 static int
qla2xxx_slave_alloc(struct scsi_device * sdev)1942 qla2xxx_slave_alloc(struct scsi_device *sdev)
1943 {
1944 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1945
1946 if (!rport || fc_remote_port_chkready(rport))
1947 return -ENXIO;
1948
1949 sdev->hostdata = *(fc_port_t **)rport->dd_data;
1950
1951 return 0;
1952 }
1953
1954 static int
qla2xxx_slave_configure(struct scsi_device * sdev)1955 qla2xxx_slave_configure(struct scsi_device *sdev)
1956 {
1957 scsi_qla_host_t *vha = shost_priv(sdev->host);
1958 struct req_que *req = vha->req;
1959
1960 scsi_change_queue_depth(sdev, req->max_q_depth);
1961 return 0;
1962 }
1963
1964 static void
qla2xxx_slave_destroy(struct scsi_device * sdev)1965 qla2xxx_slave_destroy(struct scsi_device *sdev)
1966 {
1967 sdev->hostdata = NULL;
1968 }
1969
1970 /**
1971 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1972 * @ha: HA context
1973 *
1974 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1975 * supported addressing method.
1976 */
1977 static void
qla2x00_config_dma_addressing(struct qla_hw_data * ha)1978 qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1979 {
1980 /* Assume a 32bit DMA mask. */
1981 ha->flags.enable_64bit_addressing = 0;
1982
1983 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1984 /* Any upper-dword bits set? */
1985 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1986 !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1987 /* Ok, a 64bit DMA mask is applicable. */
1988 ha->flags.enable_64bit_addressing = 1;
1989 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1990 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1991 return;
1992 }
1993 }
1994
1995 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1996 dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1997 }
1998
1999 static void
qla2x00_enable_intrs(struct qla_hw_data * ha)2000 qla2x00_enable_intrs(struct qla_hw_data *ha)
2001 {
2002 unsigned long flags = 0;
2003 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2004
2005 spin_lock_irqsave(&ha->hardware_lock, flags);
2006 ha->interrupts_on = 1;
2007 /* enable risc and host interrupts */
2008 wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC);
2009 rd_reg_word(®->ictrl);
2010 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2011
2012 }
2013
2014 static void
qla2x00_disable_intrs(struct qla_hw_data * ha)2015 qla2x00_disable_intrs(struct qla_hw_data *ha)
2016 {
2017 unsigned long flags = 0;
2018 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2019
2020 spin_lock_irqsave(&ha->hardware_lock, flags);
2021 ha->interrupts_on = 0;
2022 /* disable risc and host interrupts */
2023 wrt_reg_word(®->ictrl, 0);
2024 rd_reg_word(®->ictrl);
2025 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2026 }
2027
2028 static void
qla24xx_enable_intrs(struct qla_hw_data * ha)2029 qla24xx_enable_intrs(struct qla_hw_data *ha)
2030 {
2031 unsigned long flags = 0;
2032 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2033
2034 spin_lock_irqsave(&ha->hardware_lock, flags);
2035 ha->interrupts_on = 1;
2036 wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT);
2037 rd_reg_dword(®->ictrl);
2038 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2039 }
2040
2041 static void
qla24xx_disable_intrs(struct qla_hw_data * ha)2042 qla24xx_disable_intrs(struct qla_hw_data *ha)
2043 {
2044 unsigned long flags = 0;
2045 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2046
2047 if (IS_NOPOLLING_TYPE(ha))
2048 return;
2049 spin_lock_irqsave(&ha->hardware_lock, flags);
2050 ha->interrupts_on = 0;
2051 wrt_reg_dword(®->ictrl, 0);
2052 rd_reg_dword(®->ictrl);
2053 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2054 }
2055
2056 static int
qla2x00_iospace_config(struct qla_hw_data * ha)2057 qla2x00_iospace_config(struct qla_hw_data *ha)
2058 {
2059 resource_size_t pio;
2060 uint16_t msix;
2061
2062 if (pci_request_selected_regions(ha->pdev, ha->bars,
2063 QLA2XXX_DRIVER_NAME)) {
2064 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
2065 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
2066 pci_name(ha->pdev));
2067 goto iospace_error_exit;
2068 }
2069 if (!(ha->bars & 1))
2070 goto skip_pio;
2071
2072 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
2073 pio = pci_resource_start(ha->pdev, 0);
2074 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
2075 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
2076 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
2077 "Invalid pci I/O region size (%s).\n",
2078 pci_name(ha->pdev));
2079 pio = 0;
2080 }
2081 } else {
2082 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
2083 "Region #0 no a PIO resource (%s).\n",
2084 pci_name(ha->pdev));
2085 pio = 0;
2086 }
2087 ha->pio_address = pio;
2088 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
2089 "PIO address=%llu.\n",
2090 (unsigned long long)ha->pio_address);
2091
2092 skip_pio:
2093 /* Use MMIO operations for all accesses. */
2094 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
2095 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
2096 "Region #1 not an MMIO resource (%s), aborting.\n",
2097 pci_name(ha->pdev));
2098 goto iospace_error_exit;
2099 }
2100 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
2101 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
2102 "Invalid PCI mem region size (%s), aborting.\n",
2103 pci_name(ha->pdev));
2104 goto iospace_error_exit;
2105 }
2106
2107 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
2108 if (!ha->iobase) {
2109 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
2110 "Cannot remap MMIO (%s), aborting.\n",
2111 pci_name(ha->pdev));
2112 goto iospace_error_exit;
2113 }
2114
2115 /* Determine queue resources */
2116 ha->max_req_queues = ha->max_rsp_queues = 1;
2117 ha->msix_count = QLA_BASE_VECTORS;
2118
2119 /* Check if FW supports MQ or not */
2120 if (!(ha->fw_attributes & BIT_6))
2121 goto mqiobase_exit;
2122
2123 if (!ql2xmqsupport || !ql2xnvmeenable ||
2124 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
2125 goto mqiobase_exit;
2126
2127 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
2128 pci_resource_len(ha->pdev, 3));
2129 if (ha->mqiobase) {
2130 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
2131 "MQIO Base=%p.\n", ha->mqiobase);
2132 /* Read MSIX vector size of the board */
2133 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
2134 ha->msix_count = msix + 1;
2135 /* Max queues are bounded by available msix vectors */
2136 /* MB interrupt uses 1 vector */
2137 ha->max_req_queues = ha->msix_count - 1;
2138 ha->max_rsp_queues = ha->max_req_queues;
2139 /* Queue pairs is the max value minus the base queue pair */
2140 ha->max_qpairs = ha->max_rsp_queues - 1;
2141 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
2142 "Max no of queues pairs: %d.\n", ha->max_qpairs);
2143
2144 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
2145 "MSI-X vector count: %d.\n", ha->msix_count);
2146 } else
2147 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
2148 "BAR 3 not enabled.\n");
2149
2150 mqiobase_exit:
2151 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
2152 "MSIX Count: %d.\n", ha->msix_count);
2153 return (0);
2154
2155 iospace_error_exit:
2156 return (-ENOMEM);
2157 }
2158
2159
2160 static int
qla83xx_iospace_config(struct qla_hw_data * ha)2161 qla83xx_iospace_config(struct qla_hw_data *ha)
2162 {
2163 uint16_t msix;
2164
2165 if (pci_request_selected_regions(ha->pdev, ha->bars,
2166 QLA2XXX_DRIVER_NAME)) {
2167 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
2168 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
2169 pci_name(ha->pdev));
2170
2171 goto iospace_error_exit;
2172 }
2173
2174 /* Use MMIO operations for all accesses. */
2175 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
2176 ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
2177 "Invalid pci I/O region size (%s).\n",
2178 pci_name(ha->pdev));
2179 goto iospace_error_exit;
2180 }
2181 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
2182 ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
2183 "Invalid PCI mem region size (%s), aborting\n",
2184 pci_name(ha->pdev));
2185 goto iospace_error_exit;
2186 }
2187
2188 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
2189 if (!ha->iobase) {
2190 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
2191 "Cannot remap MMIO (%s), aborting.\n",
2192 pci_name(ha->pdev));
2193 goto iospace_error_exit;
2194 }
2195
2196 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
2197 /* 83XX 26XX always use MQ type access for queues
2198 * - mbar 2, a.k.a region 4 */
2199 ha->max_req_queues = ha->max_rsp_queues = 1;
2200 ha->msix_count = QLA_BASE_VECTORS;
2201 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
2202 pci_resource_len(ha->pdev, 4));
2203
2204 if (!ha->mqiobase) {
2205 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
2206 "BAR2/region4 not enabled\n");
2207 goto mqiobase_exit;
2208 }
2209
2210 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
2211 pci_resource_len(ha->pdev, 2));
2212 if (ha->msixbase) {
2213 /* Read MSIX vector size of the board */
2214 pci_read_config_word(ha->pdev,
2215 QLA_83XX_PCI_MSIX_CONTROL, &msix);
2216 ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
2217 /*
2218 * By default, driver uses at least two msix vectors
2219 * (default & rspq)
2220 */
2221 if (ql2xmqsupport || ql2xnvmeenable) {
2222 /* MB interrupt uses 1 vector */
2223 ha->max_req_queues = ha->msix_count - 1;
2224
2225 /* ATIOQ needs 1 vector. That's 1 less QPair */
2226 if (QLA_TGT_MODE_ENABLED())
2227 ha->max_req_queues--;
2228
2229 ha->max_rsp_queues = ha->max_req_queues;
2230
2231 /* Queue pairs is the max value minus
2232 * the base queue pair */
2233 ha->max_qpairs = ha->max_req_queues - 1;
2234 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
2235 "Max no of queues pairs: %d.\n", ha->max_qpairs);
2236 }
2237 ql_log_pci(ql_log_info, ha->pdev, 0x011c,
2238 "MSI-X vector count: %d.\n", ha->msix_count);
2239 } else
2240 ql_log_pci(ql_log_info, ha->pdev, 0x011e,
2241 "BAR 1 not enabled.\n");
2242
2243 mqiobase_exit:
2244 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
2245 "MSIX Count: %d.\n", ha->msix_count);
2246 return 0;
2247
2248 iospace_error_exit:
2249 return -ENOMEM;
2250 }
2251
2252 static struct isp_operations qla2100_isp_ops = {
2253 .pci_config = qla2100_pci_config,
2254 .reset_chip = qla2x00_reset_chip,
2255 .chip_diag = qla2x00_chip_diag,
2256 .config_rings = qla2x00_config_rings,
2257 .reset_adapter = qla2x00_reset_adapter,
2258 .nvram_config = qla2x00_nvram_config,
2259 .update_fw_options = qla2x00_update_fw_options,
2260 .load_risc = qla2x00_load_risc,
2261 .pci_info_str = qla2x00_pci_info_str,
2262 .fw_version_str = qla2x00_fw_version_str,
2263 .intr_handler = qla2100_intr_handler,
2264 .enable_intrs = qla2x00_enable_intrs,
2265 .disable_intrs = qla2x00_disable_intrs,
2266 .abort_command = qla2x00_abort_command,
2267 .target_reset = qla2x00_abort_target,
2268 .lun_reset = qla2x00_lun_reset,
2269 .fabric_login = qla2x00_login_fabric,
2270 .fabric_logout = qla2x00_fabric_logout,
2271 .calc_req_entries = qla2x00_calc_iocbs_32,
2272 .build_iocbs = qla2x00_build_scsi_iocbs_32,
2273 .prep_ms_iocb = qla2x00_prep_ms_iocb,
2274 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
2275 .read_nvram = qla2x00_read_nvram_data,
2276 .write_nvram = qla2x00_write_nvram_data,
2277 .fw_dump = qla2100_fw_dump,
2278 .beacon_on = NULL,
2279 .beacon_off = NULL,
2280 .beacon_blink = NULL,
2281 .read_optrom = qla2x00_read_optrom_data,
2282 .write_optrom = qla2x00_write_optrom_data,
2283 .get_flash_version = qla2x00_get_flash_version,
2284 .start_scsi = qla2x00_start_scsi,
2285 .start_scsi_mq = NULL,
2286 .abort_isp = qla2x00_abort_isp,
2287 .iospace_config = qla2x00_iospace_config,
2288 .initialize_adapter = qla2x00_initialize_adapter,
2289 };
2290
2291 static struct isp_operations qla2300_isp_ops = {
2292 .pci_config = qla2300_pci_config,
2293 .reset_chip = qla2x00_reset_chip,
2294 .chip_diag = qla2x00_chip_diag,
2295 .config_rings = qla2x00_config_rings,
2296 .reset_adapter = qla2x00_reset_adapter,
2297 .nvram_config = qla2x00_nvram_config,
2298 .update_fw_options = qla2x00_update_fw_options,
2299 .load_risc = qla2x00_load_risc,
2300 .pci_info_str = qla2x00_pci_info_str,
2301 .fw_version_str = qla2x00_fw_version_str,
2302 .intr_handler = qla2300_intr_handler,
2303 .enable_intrs = qla2x00_enable_intrs,
2304 .disable_intrs = qla2x00_disable_intrs,
2305 .abort_command = qla2x00_abort_command,
2306 .target_reset = qla2x00_abort_target,
2307 .lun_reset = qla2x00_lun_reset,
2308 .fabric_login = qla2x00_login_fabric,
2309 .fabric_logout = qla2x00_fabric_logout,
2310 .calc_req_entries = qla2x00_calc_iocbs_32,
2311 .build_iocbs = qla2x00_build_scsi_iocbs_32,
2312 .prep_ms_iocb = qla2x00_prep_ms_iocb,
2313 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
2314 .read_nvram = qla2x00_read_nvram_data,
2315 .write_nvram = qla2x00_write_nvram_data,
2316 .fw_dump = qla2300_fw_dump,
2317 .beacon_on = qla2x00_beacon_on,
2318 .beacon_off = qla2x00_beacon_off,
2319 .beacon_blink = qla2x00_beacon_blink,
2320 .read_optrom = qla2x00_read_optrom_data,
2321 .write_optrom = qla2x00_write_optrom_data,
2322 .get_flash_version = qla2x00_get_flash_version,
2323 .start_scsi = qla2x00_start_scsi,
2324 .start_scsi_mq = NULL,
2325 .abort_isp = qla2x00_abort_isp,
2326 .iospace_config = qla2x00_iospace_config,
2327 .initialize_adapter = qla2x00_initialize_adapter,
2328 };
2329
2330 static struct isp_operations qla24xx_isp_ops = {
2331 .pci_config = qla24xx_pci_config,
2332 .reset_chip = qla24xx_reset_chip,
2333 .chip_diag = qla24xx_chip_diag,
2334 .config_rings = qla24xx_config_rings,
2335 .reset_adapter = qla24xx_reset_adapter,
2336 .nvram_config = qla24xx_nvram_config,
2337 .update_fw_options = qla24xx_update_fw_options,
2338 .load_risc = qla24xx_load_risc,
2339 .pci_info_str = qla24xx_pci_info_str,
2340 .fw_version_str = qla24xx_fw_version_str,
2341 .intr_handler = qla24xx_intr_handler,
2342 .enable_intrs = qla24xx_enable_intrs,
2343 .disable_intrs = qla24xx_disable_intrs,
2344 .abort_command = qla24xx_abort_command,
2345 .target_reset = qla24xx_abort_target,
2346 .lun_reset = qla24xx_lun_reset,
2347 .fabric_login = qla24xx_login_fabric,
2348 .fabric_logout = qla24xx_fabric_logout,
2349 .calc_req_entries = NULL,
2350 .build_iocbs = NULL,
2351 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2352 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2353 .read_nvram = qla24xx_read_nvram_data,
2354 .write_nvram = qla24xx_write_nvram_data,
2355 .fw_dump = qla24xx_fw_dump,
2356 .beacon_on = qla24xx_beacon_on,
2357 .beacon_off = qla24xx_beacon_off,
2358 .beacon_blink = qla24xx_beacon_blink,
2359 .read_optrom = qla24xx_read_optrom_data,
2360 .write_optrom = qla24xx_write_optrom_data,
2361 .get_flash_version = qla24xx_get_flash_version,
2362 .start_scsi = qla24xx_start_scsi,
2363 .start_scsi_mq = NULL,
2364 .abort_isp = qla2x00_abort_isp,
2365 .iospace_config = qla2x00_iospace_config,
2366 .initialize_adapter = qla2x00_initialize_adapter,
2367 };
2368
2369 static struct isp_operations qla25xx_isp_ops = {
2370 .pci_config = qla25xx_pci_config,
2371 .reset_chip = qla24xx_reset_chip,
2372 .chip_diag = qla24xx_chip_diag,
2373 .config_rings = qla24xx_config_rings,
2374 .reset_adapter = qla24xx_reset_adapter,
2375 .nvram_config = qla24xx_nvram_config,
2376 .update_fw_options = qla24xx_update_fw_options,
2377 .load_risc = qla24xx_load_risc,
2378 .pci_info_str = qla24xx_pci_info_str,
2379 .fw_version_str = qla24xx_fw_version_str,
2380 .intr_handler = qla24xx_intr_handler,
2381 .enable_intrs = qla24xx_enable_intrs,
2382 .disable_intrs = qla24xx_disable_intrs,
2383 .abort_command = qla24xx_abort_command,
2384 .target_reset = qla24xx_abort_target,
2385 .lun_reset = qla24xx_lun_reset,
2386 .fabric_login = qla24xx_login_fabric,
2387 .fabric_logout = qla24xx_fabric_logout,
2388 .calc_req_entries = NULL,
2389 .build_iocbs = NULL,
2390 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2391 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2392 .read_nvram = qla25xx_read_nvram_data,
2393 .write_nvram = qla25xx_write_nvram_data,
2394 .fw_dump = qla25xx_fw_dump,
2395 .beacon_on = qla24xx_beacon_on,
2396 .beacon_off = qla24xx_beacon_off,
2397 .beacon_blink = qla24xx_beacon_blink,
2398 .read_optrom = qla25xx_read_optrom_data,
2399 .write_optrom = qla24xx_write_optrom_data,
2400 .get_flash_version = qla24xx_get_flash_version,
2401 .start_scsi = qla24xx_dif_start_scsi,
2402 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
2403 .abort_isp = qla2x00_abort_isp,
2404 .iospace_config = qla2x00_iospace_config,
2405 .initialize_adapter = qla2x00_initialize_adapter,
2406 };
2407
2408 static struct isp_operations qla81xx_isp_ops = {
2409 .pci_config = qla25xx_pci_config,
2410 .reset_chip = qla24xx_reset_chip,
2411 .chip_diag = qla24xx_chip_diag,
2412 .config_rings = qla24xx_config_rings,
2413 .reset_adapter = qla24xx_reset_adapter,
2414 .nvram_config = qla81xx_nvram_config,
2415 .update_fw_options = qla24xx_update_fw_options,
2416 .load_risc = qla81xx_load_risc,
2417 .pci_info_str = qla24xx_pci_info_str,
2418 .fw_version_str = qla24xx_fw_version_str,
2419 .intr_handler = qla24xx_intr_handler,
2420 .enable_intrs = qla24xx_enable_intrs,
2421 .disable_intrs = qla24xx_disable_intrs,
2422 .abort_command = qla24xx_abort_command,
2423 .target_reset = qla24xx_abort_target,
2424 .lun_reset = qla24xx_lun_reset,
2425 .fabric_login = qla24xx_login_fabric,
2426 .fabric_logout = qla24xx_fabric_logout,
2427 .calc_req_entries = NULL,
2428 .build_iocbs = NULL,
2429 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2430 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2431 .read_nvram = NULL,
2432 .write_nvram = NULL,
2433 .fw_dump = qla81xx_fw_dump,
2434 .beacon_on = qla24xx_beacon_on,
2435 .beacon_off = qla24xx_beacon_off,
2436 .beacon_blink = qla83xx_beacon_blink,
2437 .read_optrom = qla25xx_read_optrom_data,
2438 .write_optrom = qla24xx_write_optrom_data,
2439 .get_flash_version = qla24xx_get_flash_version,
2440 .start_scsi = qla24xx_dif_start_scsi,
2441 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
2442 .abort_isp = qla2x00_abort_isp,
2443 .iospace_config = qla2x00_iospace_config,
2444 .initialize_adapter = qla2x00_initialize_adapter,
2445 };
2446
2447 static struct isp_operations qla82xx_isp_ops = {
2448 .pci_config = qla82xx_pci_config,
2449 .reset_chip = qla82xx_reset_chip,
2450 .chip_diag = qla24xx_chip_diag,
2451 .config_rings = qla82xx_config_rings,
2452 .reset_adapter = qla24xx_reset_adapter,
2453 .nvram_config = qla81xx_nvram_config,
2454 .update_fw_options = qla24xx_update_fw_options,
2455 .load_risc = qla82xx_load_risc,
2456 .pci_info_str = qla24xx_pci_info_str,
2457 .fw_version_str = qla24xx_fw_version_str,
2458 .intr_handler = qla82xx_intr_handler,
2459 .enable_intrs = qla82xx_enable_intrs,
2460 .disable_intrs = qla82xx_disable_intrs,
2461 .abort_command = qla24xx_abort_command,
2462 .target_reset = qla24xx_abort_target,
2463 .lun_reset = qla24xx_lun_reset,
2464 .fabric_login = qla24xx_login_fabric,
2465 .fabric_logout = qla24xx_fabric_logout,
2466 .calc_req_entries = NULL,
2467 .build_iocbs = NULL,
2468 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2469 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2470 .read_nvram = qla24xx_read_nvram_data,
2471 .write_nvram = qla24xx_write_nvram_data,
2472 .fw_dump = qla82xx_fw_dump,
2473 .beacon_on = qla82xx_beacon_on,
2474 .beacon_off = qla82xx_beacon_off,
2475 .beacon_blink = NULL,
2476 .read_optrom = qla82xx_read_optrom_data,
2477 .write_optrom = qla82xx_write_optrom_data,
2478 .get_flash_version = qla82xx_get_flash_version,
2479 .start_scsi = qla82xx_start_scsi,
2480 .start_scsi_mq = NULL,
2481 .abort_isp = qla82xx_abort_isp,
2482 .iospace_config = qla82xx_iospace_config,
2483 .initialize_adapter = qla2x00_initialize_adapter,
2484 };
2485
2486 static struct isp_operations qla8044_isp_ops = {
2487 .pci_config = qla82xx_pci_config,
2488 .reset_chip = qla82xx_reset_chip,
2489 .chip_diag = qla24xx_chip_diag,
2490 .config_rings = qla82xx_config_rings,
2491 .reset_adapter = qla24xx_reset_adapter,
2492 .nvram_config = qla81xx_nvram_config,
2493 .update_fw_options = qla24xx_update_fw_options,
2494 .load_risc = qla82xx_load_risc,
2495 .pci_info_str = qla24xx_pci_info_str,
2496 .fw_version_str = qla24xx_fw_version_str,
2497 .intr_handler = qla8044_intr_handler,
2498 .enable_intrs = qla82xx_enable_intrs,
2499 .disable_intrs = qla82xx_disable_intrs,
2500 .abort_command = qla24xx_abort_command,
2501 .target_reset = qla24xx_abort_target,
2502 .lun_reset = qla24xx_lun_reset,
2503 .fabric_login = qla24xx_login_fabric,
2504 .fabric_logout = qla24xx_fabric_logout,
2505 .calc_req_entries = NULL,
2506 .build_iocbs = NULL,
2507 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2508 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2509 .read_nvram = NULL,
2510 .write_nvram = NULL,
2511 .fw_dump = qla8044_fw_dump,
2512 .beacon_on = qla82xx_beacon_on,
2513 .beacon_off = qla82xx_beacon_off,
2514 .beacon_blink = NULL,
2515 .read_optrom = qla8044_read_optrom_data,
2516 .write_optrom = qla8044_write_optrom_data,
2517 .get_flash_version = qla82xx_get_flash_version,
2518 .start_scsi = qla82xx_start_scsi,
2519 .start_scsi_mq = NULL,
2520 .abort_isp = qla8044_abort_isp,
2521 .iospace_config = qla82xx_iospace_config,
2522 .initialize_adapter = qla2x00_initialize_adapter,
2523 };
2524
2525 static struct isp_operations qla83xx_isp_ops = {
2526 .pci_config = qla25xx_pci_config,
2527 .reset_chip = qla24xx_reset_chip,
2528 .chip_diag = qla24xx_chip_diag,
2529 .config_rings = qla24xx_config_rings,
2530 .reset_adapter = qla24xx_reset_adapter,
2531 .nvram_config = qla81xx_nvram_config,
2532 .update_fw_options = qla24xx_update_fw_options,
2533 .load_risc = qla81xx_load_risc,
2534 .pci_info_str = qla24xx_pci_info_str,
2535 .fw_version_str = qla24xx_fw_version_str,
2536 .intr_handler = qla24xx_intr_handler,
2537 .enable_intrs = qla24xx_enable_intrs,
2538 .disable_intrs = qla24xx_disable_intrs,
2539 .abort_command = qla24xx_abort_command,
2540 .target_reset = qla24xx_abort_target,
2541 .lun_reset = qla24xx_lun_reset,
2542 .fabric_login = qla24xx_login_fabric,
2543 .fabric_logout = qla24xx_fabric_logout,
2544 .calc_req_entries = NULL,
2545 .build_iocbs = NULL,
2546 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2547 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2548 .read_nvram = NULL,
2549 .write_nvram = NULL,
2550 .fw_dump = qla83xx_fw_dump,
2551 .beacon_on = qla24xx_beacon_on,
2552 .beacon_off = qla24xx_beacon_off,
2553 .beacon_blink = qla83xx_beacon_blink,
2554 .read_optrom = qla25xx_read_optrom_data,
2555 .write_optrom = qla24xx_write_optrom_data,
2556 .get_flash_version = qla24xx_get_flash_version,
2557 .start_scsi = qla24xx_dif_start_scsi,
2558 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
2559 .abort_isp = qla2x00_abort_isp,
2560 .iospace_config = qla83xx_iospace_config,
2561 .initialize_adapter = qla2x00_initialize_adapter,
2562 };
2563
2564 static struct isp_operations qlafx00_isp_ops = {
2565 .pci_config = qlafx00_pci_config,
2566 .reset_chip = qlafx00_soft_reset,
2567 .chip_diag = qlafx00_chip_diag,
2568 .config_rings = qlafx00_config_rings,
2569 .reset_adapter = qlafx00_soft_reset,
2570 .nvram_config = NULL,
2571 .update_fw_options = NULL,
2572 .load_risc = NULL,
2573 .pci_info_str = qlafx00_pci_info_str,
2574 .fw_version_str = qlafx00_fw_version_str,
2575 .intr_handler = qlafx00_intr_handler,
2576 .enable_intrs = qlafx00_enable_intrs,
2577 .disable_intrs = qlafx00_disable_intrs,
2578 .abort_command = qla24xx_async_abort_command,
2579 .target_reset = qlafx00_abort_target,
2580 .lun_reset = qlafx00_lun_reset,
2581 .fabric_login = NULL,
2582 .fabric_logout = NULL,
2583 .calc_req_entries = NULL,
2584 .build_iocbs = NULL,
2585 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2586 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2587 .read_nvram = qla24xx_read_nvram_data,
2588 .write_nvram = qla24xx_write_nvram_data,
2589 .fw_dump = NULL,
2590 .beacon_on = qla24xx_beacon_on,
2591 .beacon_off = qla24xx_beacon_off,
2592 .beacon_blink = NULL,
2593 .read_optrom = qla24xx_read_optrom_data,
2594 .write_optrom = qla24xx_write_optrom_data,
2595 .get_flash_version = qla24xx_get_flash_version,
2596 .start_scsi = qlafx00_start_scsi,
2597 .start_scsi_mq = NULL,
2598 .abort_isp = qlafx00_abort_isp,
2599 .iospace_config = qlafx00_iospace_config,
2600 .initialize_adapter = qlafx00_initialize_adapter,
2601 };
2602
2603 static struct isp_operations qla27xx_isp_ops = {
2604 .pci_config = qla25xx_pci_config,
2605 .reset_chip = qla24xx_reset_chip,
2606 .chip_diag = qla24xx_chip_diag,
2607 .config_rings = qla24xx_config_rings,
2608 .reset_adapter = qla24xx_reset_adapter,
2609 .nvram_config = qla81xx_nvram_config,
2610 .update_fw_options = qla24xx_update_fw_options,
2611 .load_risc = qla81xx_load_risc,
2612 .pci_info_str = qla24xx_pci_info_str,
2613 .fw_version_str = qla24xx_fw_version_str,
2614 .intr_handler = qla24xx_intr_handler,
2615 .enable_intrs = qla24xx_enable_intrs,
2616 .disable_intrs = qla24xx_disable_intrs,
2617 .abort_command = qla24xx_abort_command,
2618 .target_reset = qla24xx_abort_target,
2619 .lun_reset = qla24xx_lun_reset,
2620 .fabric_login = qla24xx_login_fabric,
2621 .fabric_logout = qla24xx_fabric_logout,
2622 .calc_req_entries = NULL,
2623 .build_iocbs = NULL,
2624 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2625 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2626 .read_nvram = NULL,
2627 .write_nvram = NULL,
2628 .fw_dump = qla27xx_fwdump,
2629 .mpi_fw_dump = qla27xx_mpi_fwdump,
2630 .beacon_on = qla24xx_beacon_on,
2631 .beacon_off = qla24xx_beacon_off,
2632 .beacon_blink = qla83xx_beacon_blink,
2633 .read_optrom = qla25xx_read_optrom_data,
2634 .write_optrom = qla24xx_write_optrom_data,
2635 .get_flash_version = qla24xx_get_flash_version,
2636 .start_scsi = qla24xx_dif_start_scsi,
2637 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
2638 .abort_isp = qla2x00_abort_isp,
2639 .iospace_config = qla83xx_iospace_config,
2640 .initialize_adapter = qla2x00_initialize_adapter,
2641 };
2642
2643 static inline void
qla2x00_set_isp_flags(struct qla_hw_data * ha)2644 qla2x00_set_isp_flags(struct qla_hw_data *ha)
2645 {
2646 ha->device_type = DT_EXTENDED_IDS;
2647 switch (ha->pdev->device) {
2648 case PCI_DEVICE_ID_QLOGIC_ISP2100:
2649 ha->isp_type |= DT_ISP2100;
2650 ha->device_type &= ~DT_EXTENDED_IDS;
2651 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2652 break;
2653 case PCI_DEVICE_ID_QLOGIC_ISP2200:
2654 ha->isp_type |= DT_ISP2200;
2655 ha->device_type &= ~DT_EXTENDED_IDS;
2656 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2657 break;
2658 case PCI_DEVICE_ID_QLOGIC_ISP2300:
2659 ha->isp_type |= DT_ISP2300;
2660 ha->device_type |= DT_ZIO_SUPPORTED;
2661 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2662 break;
2663 case PCI_DEVICE_ID_QLOGIC_ISP2312:
2664 ha->isp_type |= DT_ISP2312;
2665 ha->device_type |= DT_ZIO_SUPPORTED;
2666 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2667 break;
2668 case PCI_DEVICE_ID_QLOGIC_ISP2322:
2669 ha->isp_type |= DT_ISP2322;
2670 ha->device_type |= DT_ZIO_SUPPORTED;
2671 if (ha->pdev->subsystem_vendor == 0x1028 &&
2672 ha->pdev->subsystem_device == 0x0170)
2673 ha->device_type |= DT_OEM_001;
2674 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2675 break;
2676 case PCI_DEVICE_ID_QLOGIC_ISP6312:
2677 ha->isp_type |= DT_ISP6312;
2678 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2679 break;
2680 case PCI_DEVICE_ID_QLOGIC_ISP6322:
2681 ha->isp_type |= DT_ISP6322;
2682 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2683 break;
2684 case PCI_DEVICE_ID_QLOGIC_ISP2422:
2685 ha->isp_type |= DT_ISP2422;
2686 ha->device_type |= DT_ZIO_SUPPORTED;
2687 ha->device_type |= DT_FWI2;
2688 ha->device_type |= DT_IIDMA;
2689 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2690 break;
2691 case PCI_DEVICE_ID_QLOGIC_ISP2432:
2692 ha->isp_type |= DT_ISP2432;
2693 ha->device_type |= DT_ZIO_SUPPORTED;
2694 ha->device_type |= DT_FWI2;
2695 ha->device_type |= DT_IIDMA;
2696 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2697 break;
2698 case PCI_DEVICE_ID_QLOGIC_ISP8432:
2699 ha->isp_type |= DT_ISP8432;
2700 ha->device_type |= DT_ZIO_SUPPORTED;
2701 ha->device_type |= DT_FWI2;
2702 ha->device_type |= DT_IIDMA;
2703 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2704 break;
2705 case PCI_DEVICE_ID_QLOGIC_ISP5422:
2706 ha->isp_type |= DT_ISP5422;
2707 ha->device_type |= DT_FWI2;
2708 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2709 break;
2710 case PCI_DEVICE_ID_QLOGIC_ISP5432:
2711 ha->isp_type |= DT_ISP5432;
2712 ha->device_type |= DT_FWI2;
2713 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2714 break;
2715 case PCI_DEVICE_ID_QLOGIC_ISP2532:
2716 ha->isp_type |= DT_ISP2532;
2717 ha->device_type |= DT_ZIO_SUPPORTED;
2718 ha->device_type |= DT_FWI2;
2719 ha->device_type |= DT_IIDMA;
2720 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2721 break;
2722 case PCI_DEVICE_ID_QLOGIC_ISP8001:
2723 ha->isp_type |= DT_ISP8001;
2724 ha->device_type |= DT_ZIO_SUPPORTED;
2725 ha->device_type |= DT_FWI2;
2726 ha->device_type |= DT_IIDMA;
2727 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2728 break;
2729 case PCI_DEVICE_ID_QLOGIC_ISP8021:
2730 ha->isp_type |= DT_ISP8021;
2731 ha->device_type |= DT_ZIO_SUPPORTED;
2732 ha->device_type |= DT_FWI2;
2733 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2734 /* Initialize 82XX ISP flags */
2735 qla82xx_init_flags(ha);
2736 break;
2737 case PCI_DEVICE_ID_QLOGIC_ISP8044:
2738 ha->isp_type |= DT_ISP8044;
2739 ha->device_type |= DT_ZIO_SUPPORTED;
2740 ha->device_type |= DT_FWI2;
2741 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2742 /* Initialize 82XX ISP flags */
2743 qla82xx_init_flags(ha);
2744 break;
2745 case PCI_DEVICE_ID_QLOGIC_ISP2031:
2746 ha->isp_type |= DT_ISP2031;
2747 ha->device_type |= DT_ZIO_SUPPORTED;
2748 ha->device_type |= DT_FWI2;
2749 ha->device_type |= DT_IIDMA;
2750 ha->device_type |= DT_T10_PI;
2751 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2752 break;
2753 case PCI_DEVICE_ID_QLOGIC_ISP8031:
2754 ha->isp_type |= DT_ISP8031;
2755 ha->device_type |= DT_ZIO_SUPPORTED;
2756 ha->device_type |= DT_FWI2;
2757 ha->device_type |= DT_IIDMA;
2758 ha->device_type |= DT_T10_PI;
2759 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2760 break;
2761 case PCI_DEVICE_ID_QLOGIC_ISPF001:
2762 ha->isp_type |= DT_ISPFX00;
2763 break;
2764 case PCI_DEVICE_ID_QLOGIC_ISP2071:
2765 ha->isp_type |= DT_ISP2071;
2766 ha->device_type |= DT_ZIO_SUPPORTED;
2767 ha->device_type |= DT_FWI2;
2768 ha->device_type |= DT_IIDMA;
2769 ha->device_type |= DT_T10_PI;
2770 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2771 break;
2772 case PCI_DEVICE_ID_QLOGIC_ISP2271:
2773 ha->isp_type |= DT_ISP2271;
2774 ha->device_type |= DT_ZIO_SUPPORTED;
2775 ha->device_type |= DT_FWI2;
2776 ha->device_type |= DT_IIDMA;
2777 ha->device_type |= DT_T10_PI;
2778 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2779 break;
2780 case PCI_DEVICE_ID_QLOGIC_ISP2261:
2781 ha->isp_type |= DT_ISP2261;
2782 ha->device_type |= DT_ZIO_SUPPORTED;
2783 ha->device_type |= DT_FWI2;
2784 ha->device_type |= DT_IIDMA;
2785 ha->device_type |= DT_T10_PI;
2786 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2787 break;
2788 case PCI_DEVICE_ID_QLOGIC_ISP2081:
2789 case PCI_DEVICE_ID_QLOGIC_ISP2089:
2790 ha->isp_type |= DT_ISP2081;
2791 ha->device_type |= DT_ZIO_SUPPORTED;
2792 ha->device_type |= DT_FWI2;
2793 ha->device_type |= DT_IIDMA;
2794 ha->device_type |= DT_T10_PI;
2795 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2796 break;
2797 case PCI_DEVICE_ID_QLOGIC_ISP2281:
2798 case PCI_DEVICE_ID_QLOGIC_ISP2289:
2799 ha->isp_type |= DT_ISP2281;
2800 ha->device_type |= DT_ZIO_SUPPORTED;
2801 ha->device_type |= DT_FWI2;
2802 ha->device_type |= DT_IIDMA;
2803 ha->device_type |= DT_T10_PI;
2804 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2805 break;
2806 }
2807
2808 if (IS_QLA82XX(ha))
2809 ha->port_no = ha->portnum & 1;
2810 else {
2811 /* Get adapter physical port no from interrupt pin register. */
2812 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2813 if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
2814 IS_QLA27XX(ha) || IS_QLA28XX(ha))
2815 ha->port_no--;
2816 else
2817 ha->port_no = !(ha->port_no & 1);
2818 }
2819
2820 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
2821 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
2822 ha->device_type, ha->port_no, ha->fw_srisc_address);
2823 }
2824
2825 static void
qla2xxx_scan_start(struct Scsi_Host * shost)2826 qla2xxx_scan_start(struct Scsi_Host *shost)
2827 {
2828 scsi_qla_host_t *vha = shost_priv(shost);
2829
2830 if (vha->hw->flags.running_gold_fw)
2831 return;
2832
2833 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2834 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2835 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2836 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
2837 }
2838
2839 static int
qla2xxx_scan_finished(struct Scsi_Host * shost,unsigned long time)2840 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2841 {
2842 scsi_qla_host_t *vha = shost_priv(shost);
2843
2844 if (test_bit(UNLOADING, &vha->dpc_flags))
2845 return 1;
2846 if (!vha->host)
2847 return 1;
2848 if (time > vha->hw->loop_reset_delay * HZ)
2849 return 1;
2850
2851 return atomic_read(&vha->loop_state) == LOOP_READY;
2852 }
2853
qla_heartbeat_work_fn(struct work_struct * work)2854 static void qla_heartbeat_work_fn(struct work_struct *work)
2855 {
2856 struct qla_hw_data *ha = container_of(work,
2857 struct qla_hw_data, heartbeat_work);
2858 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2859
2860 if (!ha->flags.mbox_busy && base_vha->flags.init_done)
2861 qla_no_op_mb(base_vha);
2862 }
2863
qla2x00_iocb_work_fn(struct work_struct * work)2864 static void qla2x00_iocb_work_fn(struct work_struct *work)
2865 {
2866 struct scsi_qla_host *vha = container_of(work,
2867 struct scsi_qla_host, iocb_work);
2868 struct qla_hw_data *ha = vha->hw;
2869 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2870 int i = 2;
2871 unsigned long flags;
2872
2873 if (test_bit(UNLOADING, &base_vha->dpc_flags))
2874 return;
2875
2876 while (!list_empty(&vha->work_list) && i > 0) {
2877 qla2x00_do_work(vha);
2878 i--;
2879 }
2880
2881 spin_lock_irqsave(&vha->work_lock, flags);
2882 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
2883 spin_unlock_irqrestore(&vha->work_lock, flags);
2884 }
2885
2886 static void
qla_trace_init(void)2887 qla_trace_init(void)
2888 {
2889 qla_trc_array = trace_array_get_by_name("qla2xxx", NULL);
2890 if (!qla_trc_array) {
2891 ql_log(ql_log_fatal, NULL, 0x0001,
2892 "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
2893 return;
2894 }
2895
2896 QLA_TRACE_ENABLE(qla_trc_array);
2897 }
2898
2899 static void
qla_trace_uninit(void)2900 qla_trace_uninit(void)
2901 {
2902 if (!qla_trc_array)
2903 return;
2904 trace_array_put(qla_trc_array);
2905 }
2906
2907 /*
2908 * PCI driver interface
2909 */
2910 static int
qla2x00_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)2911 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2912 {
2913 int ret = -ENODEV;
2914 struct Scsi_Host *host;
2915 scsi_qla_host_t *base_vha = NULL;
2916 struct qla_hw_data *ha;
2917 char pci_info[30];
2918 char fw_str[30], wq_name[30];
2919 struct scsi_host_template *sht;
2920 int bars, mem_only = 0;
2921 uint16_t req_length = 0, rsp_length = 0;
2922 struct req_que *req = NULL;
2923 struct rsp_que *rsp = NULL;
2924 int i;
2925
2926 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
2927 sht = &qla2xxx_driver_template;
2928 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
2929 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
2930 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
2931 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
2932 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
2933 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
2934 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
2935 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2936 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2937 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2938 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2939 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2940 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
2941 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
2942 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
2943 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
2944 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
2945 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
2946 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
2947 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2948 mem_only = 1;
2949 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2950 "Mem only adapter.\n");
2951 }
2952 ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2953 "Bars=%d.\n", bars);
2954
2955 if (mem_only) {
2956 if (pci_enable_device_mem(pdev))
2957 return ret;
2958 } else {
2959 if (pci_enable_device(pdev))
2960 return ret;
2961 }
2962
2963 if (is_kdump_kernel()) {
2964 ql2xmqsupport = 0;
2965 ql2xallocfwdump = 0;
2966 }
2967
2968 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
2969 if (!ha) {
2970 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2971 "Unable to allocate memory for ha.\n");
2972 goto disable_device;
2973 }
2974 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2975 "Memory allocated for ha=%p.\n", ha);
2976 ha->pdev = pdev;
2977 INIT_LIST_HEAD(&ha->tgt.q_full_list);
2978 spin_lock_init(&ha->tgt.q_full_lock);
2979 spin_lock_init(&ha->tgt.sess_lock);
2980 spin_lock_init(&ha->tgt.atio_lock);
2981
2982 spin_lock_init(&ha->sadb_lock);
2983 INIT_LIST_HEAD(&ha->sadb_tx_index_list);
2984 INIT_LIST_HEAD(&ha->sadb_rx_index_list);
2985
2986 spin_lock_init(&ha->sadb_fp_lock);
2987
2988 if (qla_edif_sadb_build_free_pool(ha)) {
2989 kfree(ha);
2990 goto disable_device;
2991 }
2992
2993 atomic_set(&ha->nvme_active_aen_cnt, 0);
2994
2995 /* Clear our data area */
2996 ha->bars = bars;
2997 ha->mem_only = mem_only;
2998 spin_lock_init(&ha->hardware_lock);
2999 spin_lock_init(&ha->vport_slock);
3000 mutex_init(&ha->selflogin_lock);
3001 mutex_init(&ha->optrom_mutex);
3002
3003 /* Set ISP-type information. */
3004 qla2x00_set_isp_flags(ha);
3005
3006 /* Set EEH reset type to fundamental if required by hba */
3007 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
3008 IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
3009 pdev->needs_freset = 1;
3010
3011 ha->prev_topology = 0;
3012 ha->init_cb_size = sizeof(init_cb_t);
3013 ha->link_data_rate = PORT_SPEED_UNKNOWN;
3014 ha->optrom_size = OPTROM_SIZE_2300;
3015 ha->max_exchg = FW_MAX_EXCHANGES_CNT;
3016 atomic_set(&ha->num_pend_mbx_stage1, 0);
3017 atomic_set(&ha->num_pend_mbx_stage2, 0);
3018 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
3019 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
3020 INIT_LIST_HEAD(&ha->tmf_pending);
3021 INIT_LIST_HEAD(&ha->tmf_active);
3022
3023 /* Assign ISP specific operations. */
3024 if (IS_QLA2100(ha)) {
3025 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
3026 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
3027 req_length = REQUEST_ENTRY_CNT_2100;
3028 rsp_length = RESPONSE_ENTRY_CNT_2100;
3029 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
3030 ha->gid_list_info_size = 4;
3031 ha->flash_conf_off = ~0;
3032 ha->flash_data_off = ~0;
3033 ha->nvram_conf_off = ~0;
3034 ha->nvram_data_off = ~0;
3035 ha->isp_ops = &qla2100_isp_ops;
3036 } else if (IS_QLA2200(ha)) {
3037 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
3038 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
3039 req_length = REQUEST_ENTRY_CNT_2200;
3040 rsp_length = RESPONSE_ENTRY_CNT_2100;
3041 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
3042 ha->gid_list_info_size = 4;
3043 ha->flash_conf_off = ~0;
3044 ha->flash_data_off = ~0;
3045 ha->nvram_conf_off = ~0;
3046 ha->nvram_data_off = ~0;
3047 ha->isp_ops = &qla2100_isp_ops;
3048 } else if (IS_QLA23XX(ha)) {
3049 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
3050 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3051 req_length = REQUEST_ENTRY_CNT_2200;
3052 rsp_length = RESPONSE_ENTRY_CNT_2300;
3053 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3054 ha->gid_list_info_size = 6;
3055 if (IS_QLA2322(ha) || IS_QLA6322(ha))
3056 ha->optrom_size = OPTROM_SIZE_2322;
3057 ha->flash_conf_off = ~0;
3058 ha->flash_data_off = ~0;
3059 ha->nvram_conf_off = ~0;
3060 ha->nvram_data_off = ~0;
3061 ha->isp_ops = &qla2300_isp_ops;
3062 } else if (IS_QLA24XX_TYPE(ha)) {
3063 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3064 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3065 req_length = REQUEST_ENTRY_CNT_24XX;
3066 rsp_length = RESPONSE_ENTRY_CNT_2300;
3067 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3068 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3069 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
3070 ha->gid_list_info_size = 8;
3071 ha->optrom_size = OPTROM_SIZE_24XX;
3072 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
3073 ha->isp_ops = &qla24xx_isp_ops;
3074 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3075 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3076 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3077 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3078 } else if (IS_QLA25XX(ha)) {
3079 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3080 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3081 req_length = REQUEST_ENTRY_CNT_24XX;
3082 rsp_length = RESPONSE_ENTRY_CNT_2300;
3083 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3084 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3085 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
3086 ha->gid_list_info_size = 8;
3087 ha->optrom_size = OPTROM_SIZE_25XX;
3088 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3089 ha->isp_ops = &qla25xx_isp_ops;
3090 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3091 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3092 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3093 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3094 } else if (IS_QLA81XX(ha)) {
3095 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3096 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3097 req_length = REQUEST_ENTRY_CNT_24XX;
3098 rsp_length = RESPONSE_ENTRY_CNT_2300;
3099 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3100 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3101 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3102 ha->gid_list_info_size = 8;
3103 ha->optrom_size = OPTROM_SIZE_81XX;
3104 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3105 ha->isp_ops = &qla81xx_isp_ops;
3106 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3107 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3108 ha->nvram_conf_off = ~0;
3109 ha->nvram_data_off = ~0;
3110 } else if (IS_QLA82XX(ha)) {
3111 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3112 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3113 req_length = REQUEST_ENTRY_CNT_82XX;
3114 rsp_length = RESPONSE_ENTRY_CNT_82XX;
3115 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3116 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3117 ha->gid_list_info_size = 8;
3118 ha->optrom_size = OPTROM_SIZE_82XX;
3119 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3120 ha->isp_ops = &qla82xx_isp_ops;
3121 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3122 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3123 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3124 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3125 } else if (IS_QLA8044(ha)) {
3126 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3127 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3128 req_length = REQUEST_ENTRY_CNT_82XX;
3129 rsp_length = RESPONSE_ENTRY_CNT_82XX;
3130 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3131 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3132 ha->gid_list_info_size = 8;
3133 ha->optrom_size = OPTROM_SIZE_83XX;
3134 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3135 ha->isp_ops = &qla8044_isp_ops;
3136 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3137 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3138 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3139 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3140 } else if (IS_QLA83XX(ha)) {
3141 ha->portnum = PCI_FUNC(ha->pdev->devfn);
3142 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3143 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3144 req_length = REQUEST_ENTRY_CNT_83XX;
3145 rsp_length = RESPONSE_ENTRY_CNT_83XX;
3146 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3147 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3148 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3149 ha->gid_list_info_size = 8;
3150 ha->optrom_size = OPTROM_SIZE_83XX;
3151 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3152 ha->isp_ops = &qla83xx_isp_ops;
3153 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3154 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3155 ha->nvram_conf_off = ~0;
3156 ha->nvram_data_off = ~0;
3157 } else if (IS_QLAFX00(ha)) {
3158 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
3159 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
3160 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
3161 req_length = REQUEST_ENTRY_CNT_FX00;
3162 rsp_length = RESPONSE_ENTRY_CNT_FX00;
3163 ha->isp_ops = &qlafx00_isp_ops;
3164 ha->port_down_retry_count = 30; /* default value */
3165 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
3166 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
3167 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
3168 ha->mr.fw_hbt_en = 1;
3169 ha->mr.host_info_resend = false;
3170 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
3171 } else if (IS_QLA27XX(ha)) {
3172 ha->portnum = PCI_FUNC(ha->pdev->devfn);
3173 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3174 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3175 req_length = REQUEST_ENTRY_CNT_83XX;
3176 rsp_length = RESPONSE_ENTRY_CNT_83XX;
3177 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3178 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3179 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3180 ha->gid_list_info_size = 8;
3181 ha->optrom_size = OPTROM_SIZE_83XX;
3182 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3183 ha->isp_ops = &qla27xx_isp_ops;
3184 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3185 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3186 ha->nvram_conf_off = ~0;
3187 ha->nvram_data_off = ~0;
3188 } else if (IS_QLA28XX(ha)) {
3189 ha->portnum = PCI_FUNC(ha->pdev->devfn);
3190 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3191 ha->mbx_count = MAILBOX_REGISTER_COUNT;
3192 req_length = REQUEST_ENTRY_CNT_83XX;
3193 rsp_length = RESPONSE_ENTRY_CNT_83XX;
3194 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3195 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3196 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3197 ha->gid_list_info_size = 8;
3198 ha->optrom_size = OPTROM_SIZE_28XX;
3199 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3200 ha->isp_ops = &qla27xx_isp_ops;
3201 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
3202 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
3203 ha->nvram_conf_off = ~0;
3204 ha->nvram_data_off = ~0;
3205 }
3206
3207 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
3208 "mbx_count=%d, req_length=%d, "
3209 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
3210 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
3211 "max_fibre_devices=%d.\n",
3212 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
3213 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
3214 ha->nvram_npiv_size, ha->max_fibre_devices);
3215 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
3216 "isp_ops=%p, flash_conf_off=%d, "
3217 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
3218 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
3219 ha->nvram_conf_off, ha->nvram_data_off);
3220
3221 /* Configure PCI I/O space */
3222 ret = ha->isp_ops->iospace_config(ha);
3223 if (ret)
3224 goto iospace_config_failed;
3225
3226 ql_log_pci(ql_log_info, pdev, 0x001d,
3227 "Found an ISP%04X irq %d iobase 0x%p.\n",
3228 pdev->device, pdev->irq, ha->iobase);
3229 mutex_init(&ha->vport_lock);
3230 mutex_init(&ha->mq_lock);
3231 init_completion(&ha->mbx_cmd_comp);
3232 complete(&ha->mbx_cmd_comp);
3233 init_completion(&ha->mbx_intr_comp);
3234 init_completion(&ha->dcbx_comp);
3235 init_completion(&ha->lb_portup_comp);
3236
3237 set_bit(0, (unsigned long *) ha->vp_idx_map);
3238
3239 qla2x00_config_dma_addressing(ha);
3240 ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
3241 "64 Bit addressing is %s.\n",
3242 ha->flags.enable_64bit_addressing ? "enable" :
3243 "disable");
3244 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
3245 if (ret) {
3246 ql_log_pci(ql_log_fatal, pdev, 0x0031,
3247 "Failed to allocate memory for adapter, aborting.\n");
3248
3249 goto probe_hw_failed;
3250 }
3251
3252 req->max_q_depth = MAX_Q_DEPTH;
3253 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
3254 req->max_q_depth = ql2xmaxqdepth;
3255
3256
3257 base_vha = qla2x00_create_host(sht, ha);
3258 if (!base_vha) {
3259 ret = -ENOMEM;
3260 goto probe_hw_failed;
3261 }
3262
3263 pci_set_drvdata(pdev, base_vha);
3264 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3265
3266 host = base_vha->host;
3267 base_vha->req = req;
3268 if (IS_QLA2XXX_MIDTYPE(ha))
3269 base_vha->mgmt_svr_loop_id =
3270 qla2x00_reserve_mgmt_server_loop_id(base_vha);
3271 else
3272 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
3273 base_vha->vp_idx;
3274
3275 /* Setup fcport template structure. */
3276 ha->mr.fcport.vha = base_vha;
3277 ha->mr.fcport.port_type = FCT_UNKNOWN;
3278 ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
3279 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
3280 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
3281 ha->mr.fcport.scan_state = 1;
3282
3283 qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
3284 QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
3285 QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
3286
3287 /* Set the SG table size based on ISP type */
3288 if (!IS_FWI2_CAPABLE(ha)) {
3289 if (IS_QLA2100(ha))
3290 host->sg_tablesize = 32;
3291 } else {
3292 if (!IS_QLA82XX(ha))
3293 host->sg_tablesize = QLA_SG_ALL;
3294 }
3295 host->max_id = ha->max_fibre_devices;
3296 host->cmd_per_lun = 3;
3297 host->unique_id = host->host_no;
3298
3299 if (ql2xenabledif && ql2xenabledif != 2) {
3300 ql_log(ql_log_warn, base_vha, 0x302d,
3301 "Invalid value for ql2xenabledif, resetting it to default (2)\n");
3302 ql2xenabledif = 2;
3303 }
3304
3305 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
3306 host->max_cmd_len = 32;
3307 else
3308 host->max_cmd_len = MAX_CMDSZ;
3309 host->max_channel = MAX_BUSES - 1;
3310 /* Older HBAs support only 16-bit LUNs */
3311 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
3312 ql2xmaxlun > 0xffff)
3313 host->max_lun = 0xffff;
3314 else
3315 host->max_lun = ql2xmaxlun;
3316 host->transportt = qla2xxx_transport_template;
3317 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
3318
3319 ql_dbg(ql_dbg_init, base_vha, 0x0033,
3320 "max_id=%d this_id=%d "
3321 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
3322 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
3323 host->this_id, host->cmd_per_lun, host->unique_id,
3324 host->max_cmd_len, host->max_channel, host->max_lun,
3325 host->transportt, sht->vendor_id);
3326
3327 INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
3328
3329 /* Set up the irqs */
3330 ret = qla2x00_request_irqs(ha, rsp);
3331 if (ret)
3332 goto probe_failed;
3333
3334 /* Alloc arrays of request and response ring ptrs */
3335 ret = qla2x00_alloc_queues(ha, req, rsp);
3336 if (ret) {
3337 ql_log(ql_log_fatal, base_vha, 0x003d,
3338 "Failed to allocate memory for queue pointers..."
3339 "aborting.\n");
3340 ret = -ENODEV;
3341 goto probe_failed;
3342 }
3343
3344 if (ha->mqenable) {
3345 /* number of hardware queues supported by blk/scsi-mq*/
3346 host->nr_hw_queues = ha->max_qpairs;
3347
3348 ql_dbg(ql_dbg_init, base_vha, 0x0192,
3349 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
3350 } else {
3351 if (ql2xnvmeenable) {
3352 host->nr_hw_queues = ha->max_qpairs;
3353 ql_dbg(ql_dbg_init, base_vha, 0x0194,
3354 "FC-NVMe support is enabled, HW queues=%d\n",
3355 host->nr_hw_queues);
3356 } else {
3357 ql_dbg(ql_dbg_init, base_vha, 0x0193,
3358 "blk/scsi-mq disabled.\n");
3359 }
3360 }
3361
3362 qlt_probe_one_stage1(base_vha, ha);
3363
3364 pci_save_state(pdev);
3365
3366 /* Assign back pointers */
3367 rsp->req = req;
3368 req->rsp = rsp;
3369
3370 if (IS_QLAFX00(ha)) {
3371 ha->rsp_q_map[0] = rsp;
3372 ha->req_q_map[0] = req;
3373 set_bit(0, ha->req_qid_map);
3374 set_bit(0, ha->rsp_qid_map);
3375 }
3376
3377 /* FWI2-capable only. */
3378 req->req_q_in = &ha->iobase->isp24.req_q_in;
3379 req->req_q_out = &ha->iobase->isp24.req_q_out;
3380 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
3381 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
3382 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3383 IS_QLA28XX(ha)) {
3384 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
3385 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
3386 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
3387 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
3388 }
3389
3390 if (IS_QLAFX00(ha)) {
3391 req->req_q_in = &ha->iobase->ispfx00.req_q_in;
3392 req->req_q_out = &ha->iobase->ispfx00.req_q_out;
3393 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
3394 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
3395 }
3396
3397 if (IS_P3P_TYPE(ha)) {
3398 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
3399 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
3400 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
3401 }
3402
3403 ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
3404 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
3405 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
3406 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
3407 "req->req_q_in=%p req->req_q_out=%p "
3408 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3409 req->req_q_in, req->req_q_out,
3410 rsp->rsp_q_in, rsp->rsp_q_out);
3411 ql_dbg(ql_dbg_init, base_vha, 0x003e,
3412 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
3413 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
3414 ql_dbg(ql_dbg_init, base_vha, 0x003f,
3415 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3416 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
3417
3418 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
3419 if (unlikely(!ha->wq)) {
3420 ret = -ENOMEM;
3421 goto probe_failed;
3422 }
3423
3424 if (ha->isp_ops->initialize_adapter(base_vha)) {
3425 ql_log(ql_log_fatal, base_vha, 0x00d6,
3426 "Failed to initialize adapter - Adapter flags %x.\n",
3427 base_vha->device_flags);
3428
3429 if (IS_QLA82XX(ha)) {
3430 qla82xx_idc_lock(ha);
3431 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3432 QLA8XXX_DEV_FAILED);
3433 qla82xx_idc_unlock(ha);
3434 ql_log(ql_log_fatal, base_vha, 0x00d7,
3435 "HW State: FAILED.\n");
3436 } else if (IS_QLA8044(ha)) {
3437 qla8044_idc_lock(ha);
3438 qla8044_wr_direct(base_vha,
3439 QLA8044_CRB_DEV_STATE_INDEX,
3440 QLA8XXX_DEV_FAILED);
3441 qla8044_idc_unlock(ha);
3442 ql_log(ql_log_fatal, base_vha, 0x0150,
3443 "HW State: FAILED.\n");
3444 }
3445
3446 ret = -ENODEV;
3447 goto probe_failed;
3448 }
3449
3450 if (IS_QLAFX00(ha))
3451 host->can_queue = QLAFX00_MAX_CANQUEUE;
3452 else
3453 host->can_queue = req->num_outstanding_cmds - 10;
3454
3455 ql_dbg(ql_dbg_init, base_vha, 0x0032,
3456 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
3457 host->can_queue, base_vha->req,
3458 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3459
3460 /* Check if FW supports MQ or not for ISP25xx */
3461 if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
3462 ha->mqenable = 0;
3463
3464 if (ha->mqenable) {
3465 bool startit = false;
3466
3467 if (QLA_TGT_MODE_ENABLED())
3468 startit = false;
3469
3470 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
3471 startit = true;
3472
3473 /* Create start of day qpairs for Block MQ */
3474 for (i = 0; i < ha->max_qpairs; i++)
3475 qla2xxx_create_qpair(base_vha, 5, 0, startit);
3476 }
3477 qla_init_iocb_limit(base_vha);
3478
3479 if (ha->flags.running_gold_fw)
3480 goto skip_dpc;
3481
3482 /*
3483 * Startup the kernel thread for this host adapter
3484 */
3485 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
3486 "%s_dpc", base_vha->host_str);
3487 if (IS_ERR(ha->dpc_thread)) {
3488 ql_log(ql_log_fatal, base_vha, 0x00ed,
3489 "Failed to start DPC thread.\n");
3490 ret = PTR_ERR(ha->dpc_thread);
3491 ha->dpc_thread = NULL;
3492 goto probe_failed;
3493 }
3494 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
3495 "DPC thread started successfully.\n");
3496
3497 /*
3498 * If we're not coming up in initiator mode, we might sit for
3499 * a while without waking up the dpc thread, which leads to a
3500 * stuck process warning. So just kick the dpc once here and
3501 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
3502 */
3503 qla2xxx_wake_dpc(base_vha);
3504
3505 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3506
3507 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
3508 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
3509 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
3510 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
3511
3512 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
3513 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
3514 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
3515 INIT_WORK(&ha->idc_state_handler,
3516 qla83xx_idc_state_handler_work);
3517 INIT_WORK(&ha->nic_core_unrecoverable,
3518 qla83xx_nic_core_unrecoverable_work);
3519 }
3520
3521 skip_dpc:
3522 list_add_tail(&base_vha->list, &ha->vp_list);
3523 base_vha->host->irq = ha->pdev->irq;
3524
3525 /* Initialized the timer */
3526 qla2x00_start_timer(base_vha, WATCH_INTERVAL);
3527 ql_dbg(ql_dbg_init, base_vha, 0x00ef,
3528 "Started qla2x00_timer with "
3529 "interval=%d.\n", WATCH_INTERVAL);
3530 ql_dbg(ql_dbg_init, base_vha, 0x00f0,
3531 "Detected hba at address=%p.\n",
3532 ha);
3533
3534 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
3535 if (ha->fw_attributes & BIT_4) {
3536 int prot = 0, guard;
3537
3538 base_vha->flags.difdix_supported = 1;
3539 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
3540 "Registering for DIF/DIX type 1 and 3 protection.\n");
3541 if (ql2xprotmask)
3542 scsi_host_set_prot(host, ql2xprotmask);
3543 else
3544 scsi_host_set_prot(host,
3545 prot | SHOST_DIF_TYPE1_PROTECTION
3546 | SHOST_DIF_TYPE2_PROTECTION
3547 | SHOST_DIF_TYPE3_PROTECTION
3548 | SHOST_DIX_TYPE1_PROTECTION
3549 | SHOST_DIX_TYPE2_PROTECTION
3550 | SHOST_DIX_TYPE3_PROTECTION);
3551
3552 guard = SHOST_DIX_GUARD_CRC;
3553
3554 if (IS_PI_IPGUARD_CAPABLE(ha) &&
3555 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
3556 guard |= SHOST_DIX_GUARD_IP;
3557
3558 if (ql2xprotguard)
3559 scsi_host_set_guard(host, ql2xprotguard);
3560 else
3561 scsi_host_set_guard(host, guard);
3562 } else
3563 base_vha->flags.difdix_supported = 0;
3564 }
3565
3566 ha->isp_ops->enable_intrs(ha);
3567
3568 if (IS_QLAFX00(ha)) {
3569 ret = qlafx00_fx_disc(base_vha,
3570 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
3571 host->sg_tablesize = (ha->mr.extended_io_enabled) ?
3572 QLA_SG_ALL : 128;
3573 }
3574
3575 if (IS_T10_PI_CAPABLE(base_vha->hw))
3576 host->dma_alignment = 0x7;
3577
3578 ret = scsi_add_host(host, &pdev->dev);
3579 if (ret)
3580 goto probe_failed;
3581
3582 base_vha->flags.init_done = 1;
3583 base_vha->flags.online = 1;
3584 ha->prev_minidump_failed = 0;
3585
3586 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
3587 "Init done and hba is online.\n");
3588
3589 if (qla_ini_mode_enabled(base_vha) ||
3590 qla_dual_mode_enabled(base_vha))
3591 scsi_scan_host(host);
3592 else
3593 ql_log(ql_log_info, base_vha, 0x0122,
3594 "skipping scsi_scan_host() for non-initiator port\n");
3595
3596 qla2x00_alloc_sysfs_attr(base_vha);
3597
3598 if (IS_QLAFX00(ha)) {
3599 ret = qlafx00_fx_disc(base_vha,
3600 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
3601
3602 /* Register system information */
3603 ret = qlafx00_fx_disc(base_vha,
3604 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
3605 }
3606
3607 qla2x00_init_host_attr(base_vha);
3608
3609 qla2x00_dfs_setup(base_vha);
3610
3611 ql_log(ql_log_info, base_vha, 0x00fb,
3612 "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
3613 ql_log(ql_log_info, base_vha, 0x00fc,
3614 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
3615 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
3616 sizeof(pci_info)),
3617 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
3618 base_vha->host_no,
3619 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
3620
3621 qlt_add_target(ha, base_vha);
3622
3623 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3624
3625 if (test_bit(UNLOADING, &base_vha->dpc_flags))
3626 return -ENODEV;
3627
3628 return 0;
3629
3630 probe_failed:
3631 qla_enode_stop(base_vha);
3632 qla_edb_stop(base_vha);
3633 vfree(base_vha->scan.l);
3634 if (base_vha->gnl.l) {
3635 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3636 base_vha->gnl.l, base_vha->gnl.ldma);
3637 base_vha->gnl.l = NULL;
3638 }
3639
3640 if (base_vha->timer_active)
3641 qla2x00_stop_timer(base_vha);
3642 base_vha->flags.online = 0;
3643 if (ha->dpc_thread) {
3644 struct task_struct *t = ha->dpc_thread;
3645
3646 ha->dpc_thread = NULL;
3647 kthread_stop(t);
3648 }
3649
3650 qla2x00_free_device(base_vha);
3651 scsi_host_put(base_vha->host);
3652 /*
3653 * Need to NULL out local req/rsp after
3654 * qla2x00_free_device => qla2x00_free_queues frees
3655 * what these are pointing to. Or else we'll
3656 * fall over below in qla2x00_free_req/rsp_que.
3657 */
3658 req = NULL;
3659 rsp = NULL;
3660
3661 probe_hw_failed:
3662 qla2x00_mem_free(ha);
3663 qla2x00_free_req_que(ha, req);
3664 qla2x00_free_rsp_que(ha, rsp);
3665 qla2x00_clear_drv_active(ha);
3666
3667 iospace_config_failed:
3668 if (IS_P3P_TYPE(ha)) {
3669 if (!ha->nx_pcibase)
3670 iounmap((device_reg_t *)ha->nx_pcibase);
3671 if (!ql2xdbwr)
3672 iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3673 } else {
3674 if (ha->iobase)
3675 iounmap(ha->iobase);
3676 if (ha->cregbase)
3677 iounmap(ha->cregbase);
3678 }
3679 pci_release_selected_regions(ha->pdev, ha->bars);
3680 kfree(ha);
3681
3682 disable_device:
3683 pci_disable_device(pdev);
3684 return ret;
3685 }
3686
__qla_set_remove_flag(scsi_qla_host_t * base_vha)3687 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
3688 {
3689 scsi_qla_host_t *vp;
3690 unsigned long flags;
3691 struct qla_hw_data *ha;
3692
3693 if (!base_vha)
3694 return;
3695
3696 ha = base_vha->hw;
3697
3698 spin_lock_irqsave(&ha->vport_slock, flags);
3699 list_for_each_entry(vp, &ha->vp_list, list)
3700 set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
3701
3702 /*
3703 * Indicate device removal to prevent future board_disable
3704 * and wait until any pending board_disable has completed.
3705 */
3706 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3707 spin_unlock_irqrestore(&ha->vport_slock, flags);
3708 }
3709
3710 static void
qla2x00_shutdown(struct pci_dev * pdev)3711 qla2x00_shutdown(struct pci_dev *pdev)
3712 {
3713 scsi_qla_host_t *vha;
3714 struct qla_hw_data *ha;
3715
3716 vha = pci_get_drvdata(pdev);
3717 ha = vha->hw;
3718
3719 ql_log(ql_log_info, vha, 0xfffa,
3720 "Adapter shutdown\n");
3721
3722 /*
3723 * Prevent future board_disable and wait
3724 * until any pending board_disable has completed.
3725 */
3726 __qla_set_remove_flag(vha);
3727 cancel_work_sync(&ha->board_disable);
3728
3729 if (!atomic_read(&pdev->enable_cnt))
3730 return;
3731
3732 /* Notify ISPFX00 firmware */
3733 if (IS_QLAFX00(ha))
3734 qlafx00_driver_shutdown(vha, 20);
3735
3736 /* Turn-off FCE trace */
3737 if (ha->flags.fce_enabled) {
3738 qla2x00_disable_fce_trace(vha, NULL, NULL);
3739 ha->flags.fce_enabled = 0;
3740 }
3741
3742 /* Turn-off EFT trace */
3743 if (ha->eft)
3744 qla2x00_disable_eft_trace(vha);
3745
3746 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3747 IS_QLA28XX(ha)) {
3748 if (ha->flags.fw_started)
3749 qla2x00_abort_isp_cleanup(vha);
3750 } else {
3751 /* Stop currently executing firmware. */
3752 qla2x00_try_to_stop_firmware(vha);
3753 }
3754
3755 /* Disable timer */
3756 if (vha->timer_active)
3757 qla2x00_stop_timer(vha);
3758
3759 /* Turn adapter off line */
3760 vha->flags.online = 0;
3761
3762 /* turn-off interrupts on the card */
3763 if (ha->interrupts_on) {
3764 vha->flags.init_done = 0;
3765 ha->isp_ops->disable_intrs(ha);
3766 }
3767
3768 qla2x00_free_irqs(vha);
3769
3770 qla2x00_free_fw_dump(ha);
3771
3772 pci_disable_device(pdev);
3773 ql_log(ql_log_info, vha, 0xfffe,
3774 "Adapter shutdown successfully.\n");
3775 }
3776
3777 /* Deletes all the virtual ports for a given ha */
3778 static void
qla2x00_delete_all_vps(struct qla_hw_data * ha,scsi_qla_host_t * base_vha)3779 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
3780 {
3781 scsi_qla_host_t *vha;
3782 unsigned long flags;
3783
3784 mutex_lock(&ha->vport_lock);
3785 while (ha->cur_vport_count) {
3786 spin_lock_irqsave(&ha->vport_slock, flags);
3787
3788 BUG_ON(base_vha->list.next == &ha->vp_list);
3789 /* This assumes first entry in ha->vp_list is always base vha */
3790 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
3791 scsi_host_get(vha->host);
3792
3793 spin_unlock_irqrestore(&ha->vport_slock, flags);
3794 mutex_unlock(&ha->vport_lock);
3795
3796 qla_nvme_delete(vha);
3797
3798 fc_vport_terminate(vha->fc_vport);
3799 scsi_host_put(vha->host);
3800
3801 mutex_lock(&ha->vport_lock);
3802 }
3803 mutex_unlock(&ha->vport_lock);
3804 }
3805
3806 /* Stops all deferred work threads */
3807 static void
qla2x00_destroy_deferred_work(struct qla_hw_data * ha)3808 qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
3809 {
3810 /* Cancel all work and destroy DPC workqueues */
3811 if (ha->dpc_lp_wq) {
3812 cancel_work_sync(&ha->idc_aen);
3813 destroy_workqueue(ha->dpc_lp_wq);
3814 ha->dpc_lp_wq = NULL;
3815 }
3816
3817 if (ha->dpc_hp_wq) {
3818 cancel_work_sync(&ha->nic_core_reset);
3819 cancel_work_sync(&ha->idc_state_handler);
3820 cancel_work_sync(&ha->nic_core_unrecoverable);
3821 destroy_workqueue(ha->dpc_hp_wq);
3822 ha->dpc_hp_wq = NULL;
3823 }
3824
3825 /* Kill the kernel thread for this host */
3826 if (ha->dpc_thread) {
3827 struct task_struct *t = ha->dpc_thread;
3828
3829 /*
3830 * qla2xxx_wake_dpc checks for ->dpc_thread
3831 * so we need to zero it out.
3832 */
3833 ha->dpc_thread = NULL;
3834 kthread_stop(t);
3835 }
3836 }
3837
3838 static void
qla2x00_unmap_iobases(struct qla_hw_data * ha)3839 qla2x00_unmap_iobases(struct qla_hw_data *ha)
3840 {
3841 if (IS_QLA82XX(ha)) {
3842
3843 iounmap((device_reg_t *)ha->nx_pcibase);
3844 if (!ql2xdbwr)
3845 iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3846 } else {
3847 if (ha->iobase)
3848 iounmap(ha->iobase);
3849
3850 if (ha->cregbase)
3851 iounmap(ha->cregbase);
3852
3853 if (ha->mqiobase)
3854 iounmap(ha->mqiobase);
3855
3856 if (ha->msixbase)
3857 iounmap(ha->msixbase);
3858 }
3859 }
3860
3861 static void
qla2x00_clear_drv_active(struct qla_hw_data * ha)3862 qla2x00_clear_drv_active(struct qla_hw_data *ha)
3863 {
3864 if (IS_QLA8044(ha)) {
3865 qla8044_idc_lock(ha);
3866 qla8044_clear_drv_active(ha);
3867 qla8044_idc_unlock(ha);
3868 } else if (IS_QLA82XX(ha)) {
3869 qla82xx_idc_lock(ha);
3870 qla82xx_clear_drv_active(ha);
3871 qla82xx_idc_unlock(ha);
3872 }
3873 }
3874
3875 static void
qla2x00_remove_one(struct pci_dev * pdev)3876 qla2x00_remove_one(struct pci_dev *pdev)
3877 {
3878 scsi_qla_host_t *base_vha;
3879 struct qla_hw_data *ha;
3880
3881 base_vha = pci_get_drvdata(pdev);
3882 ha = base_vha->hw;
3883 ql_log(ql_log_info, base_vha, 0xb079,
3884 "Removing driver\n");
3885 __qla_set_remove_flag(base_vha);
3886 cancel_work_sync(&ha->board_disable);
3887
3888 /*
3889 * If the PCI device is disabled then there was a PCI-disconnect and
3890 * qla2x00_disable_board_on_pci_error has taken care of most of the
3891 * resources.
3892 */
3893 if (!atomic_read(&pdev->enable_cnt)) {
3894 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3895 base_vha->gnl.l, base_vha->gnl.ldma);
3896 base_vha->gnl.l = NULL;
3897 scsi_host_put(base_vha->host);
3898 kfree(ha);
3899 pci_set_drvdata(pdev, NULL);
3900 return;
3901 }
3902 qla2x00_wait_for_hba_ready(base_vha);
3903
3904 /*
3905 * if UNLOADING flag is already set, then continue unload,
3906 * where it was set first.
3907 */
3908 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
3909 return;
3910
3911 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3912 IS_QLA28XX(ha)) {
3913 if (ha->flags.fw_started)
3914 qla2x00_abort_isp_cleanup(base_vha);
3915 } else if (!IS_QLAFX00(ha)) {
3916 if (IS_QLA8031(ha)) {
3917 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3918 "Clearing fcoe driver presence.\n");
3919 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3920 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3921 "Error while clearing DRV-Presence.\n");
3922 }
3923
3924 qla2x00_try_to_stop_firmware(base_vha);
3925 }
3926
3927 qla2x00_wait_for_sess_deletion(base_vha);
3928
3929 qla_nvme_delete(base_vha);
3930
3931 dma_free_coherent(&ha->pdev->dev,
3932 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3933
3934 base_vha->gnl.l = NULL;
3935 qla_enode_stop(base_vha);
3936 qla_edb_stop(base_vha);
3937
3938 vfree(base_vha->scan.l);
3939
3940 if (IS_QLAFX00(ha))
3941 qlafx00_driver_shutdown(base_vha, 20);
3942
3943 qla2x00_delete_all_vps(ha, base_vha);
3944
3945 qla2x00_dfs_remove(base_vha);
3946
3947 qla84xx_put_chip(base_vha);
3948
3949 /* Disable timer */
3950 if (base_vha->timer_active)
3951 qla2x00_stop_timer(base_vha);
3952
3953 base_vha->flags.online = 0;
3954
3955 /* free DMA memory */
3956 if (ha->exlogin_buf)
3957 qla2x00_free_exlogin_buffer(ha);
3958
3959 /* free DMA memory */
3960 if (ha->exchoffld_buf)
3961 qla2x00_free_exchoffld_buffer(ha);
3962
3963 qla2x00_destroy_deferred_work(ha);
3964
3965 qlt_remove_target(ha, base_vha);
3966
3967 qla2x00_free_sysfs_attr(base_vha, true);
3968
3969 fc_remove_host(base_vha->host);
3970
3971 scsi_remove_host(base_vha->host);
3972
3973 qla2x00_free_device(base_vha);
3974
3975 qla2x00_clear_drv_active(ha);
3976
3977 scsi_host_put(base_vha->host);
3978
3979 qla2x00_unmap_iobases(ha);
3980
3981 pci_release_selected_regions(ha->pdev, ha->bars);
3982 kfree(ha);
3983
3984 pci_disable_device(pdev);
3985 }
3986
3987 static inline void
qla24xx_free_purex_list(struct purex_list * list)3988 qla24xx_free_purex_list(struct purex_list *list)
3989 {
3990 struct purex_item *item, *next;
3991 ulong flags;
3992
3993 spin_lock_irqsave(&list->lock, flags);
3994 list_for_each_entry_safe(item, next, &list->head, list) {
3995 list_del(&item->list);
3996 if (item == &item->vha->default_item)
3997 continue;
3998 kfree(item);
3999 }
4000 spin_unlock_irqrestore(&list->lock, flags);
4001 }
4002
4003 static void
qla2x00_free_device(scsi_qla_host_t * vha)4004 qla2x00_free_device(scsi_qla_host_t *vha)
4005 {
4006 struct qla_hw_data *ha = vha->hw;
4007
4008 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
4009
4010 /* Disable timer */
4011 if (vha->timer_active)
4012 qla2x00_stop_timer(vha);
4013
4014 qla25xx_delete_queues(vha);
4015 vha->flags.online = 0;
4016
4017 /* turn-off interrupts on the card */
4018 if (ha->interrupts_on) {
4019 vha->flags.init_done = 0;
4020 ha->isp_ops->disable_intrs(ha);
4021 }
4022
4023 qla2x00_free_fcports(vha);
4024
4025 qla2x00_free_irqs(vha);
4026
4027 /* Flush the work queue and remove it */
4028 if (ha->wq) {
4029 destroy_workqueue(ha->wq);
4030 ha->wq = NULL;
4031 }
4032
4033
4034 qla24xx_free_purex_list(&vha->purex_list);
4035
4036 qla2x00_mem_free(ha);
4037
4038 qla82xx_md_free(vha);
4039
4040 qla_edif_sadb_release_free_pool(ha);
4041 qla_edif_sadb_release(ha);
4042
4043 qla2x00_free_queues(ha);
4044 }
4045
qla2x00_free_fcports(struct scsi_qla_host * vha)4046 void qla2x00_free_fcports(struct scsi_qla_host *vha)
4047 {
4048 fc_port_t *fcport, *tfcport;
4049
4050 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
4051 qla2x00_free_fcport(fcport);
4052 }
4053
4054 static inline void
qla2x00_schedule_rport_del(struct scsi_qla_host * vha,fc_port_t * fcport)4055 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
4056 {
4057 int now;
4058
4059 if (!fcport->rport)
4060 return;
4061
4062 if (fcport->rport) {
4063 ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
4064 "%s %8phN. rport %p roles %x\n",
4065 __func__, fcport->port_name, fcport->rport,
4066 fcport->rport->roles);
4067 fc_remote_port_delete(fcport->rport);
4068 }
4069 qlt_do_generation_tick(vha, &now);
4070 }
4071
4072 /*
4073 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
4074 *
4075 * Input: ha = adapter block pointer. fcport = port structure pointer.
4076 *
4077 * Return: None.
4078 *
4079 * Context:
4080 */
qla2x00_mark_device_lost(scsi_qla_host_t * vha,fc_port_t * fcport,int do_login)4081 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
4082 int do_login)
4083 {
4084 if (IS_QLAFX00(vha->hw)) {
4085 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4086 qla2x00_schedule_rport_del(vha, fcport);
4087 return;
4088 }
4089
4090 if (atomic_read(&fcport->state) == FCS_ONLINE &&
4091 vha->vp_idx == fcport->vha->vp_idx) {
4092 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4093 qla2x00_schedule_rport_del(vha, fcport);
4094 }
4095
4096 /*
4097 * We may need to retry the login, so don't change the state of the
4098 * port but do the retries.
4099 */
4100 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
4101 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4102
4103 if (!do_login)
4104 return;
4105
4106 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4107 }
4108
4109 void
qla2x00_mark_all_devices_lost(scsi_qla_host_t * vha)4110 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
4111 {
4112 fc_port_t *fcport;
4113
4114 ql_dbg(ql_dbg_disc, vha, 0x20f1,
4115 "Mark all dev lost\n");
4116
4117 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4118 if (ql2xfc2target &&
4119 fcport->loop_id != FC_NO_LOOP_ID &&
4120 (fcport->flags & FCF_FCP2_DEVICE) &&
4121 fcport->port_type == FCT_TARGET &&
4122 !qla2x00_reset_active(vha)) {
4123 ql_dbg(ql_dbg_disc, vha, 0x211a,
4124 "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
4125 fcport->flags, fcport->port_type,
4126 fcport->d_id.b24, fcport->port_name);
4127 continue;
4128 }
4129 fcport->scan_state = 0;
4130 qlt_schedule_sess_for_deletion(fcport);
4131 }
4132 }
4133
qla2x00_set_reserved_loop_ids(struct qla_hw_data * ha)4134 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
4135 {
4136 int i;
4137
4138 if (IS_FWI2_CAPABLE(ha))
4139 return;
4140
4141 for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
4142 set_bit(i, ha->loop_id_map);
4143 set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
4144 set_bit(BROADCAST, ha->loop_id_map);
4145 }
4146
4147 /*
4148 * qla2x00_mem_alloc
4149 * Allocates adapter memory.
4150 *
4151 * Returns:
4152 * 0 = success.
4153 * !0 = failure.
4154 */
4155 static int
qla2x00_mem_alloc(struct qla_hw_data * ha,uint16_t req_len,uint16_t rsp_len,struct req_que ** req,struct rsp_que ** rsp)4156 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
4157 struct req_que **req, struct rsp_que **rsp)
4158 {
4159 char name[16];
4160 int rc;
4161
4162 if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) {
4163 ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL);
4164 if (!ha->vp_map)
4165 goto fail;
4166 }
4167
4168 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
4169 &ha->init_cb_dma, GFP_KERNEL);
4170 if (!ha->init_cb)
4171 goto fail_free_vp_map;
4172
4173 rc = btree_init32(&ha->host_map);
4174 if (rc)
4175 goto fail_free_init_cb;
4176
4177 if (qlt_mem_alloc(ha) < 0)
4178 goto fail_free_btree;
4179
4180 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
4181 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
4182 if (!ha->gid_list)
4183 goto fail_free_tgt_mem;
4184
4185 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
4186 if (!ha->srb_mempool)
4187 goto fail_free_gid_list;
4188
4189 if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
4190 /* Allocate cache for CT6 Ctx. */
4191 if (!ctx_cachep) {
4192 ctx_cachep = kmem_cache_create("qla2xxx_ctx",
4193 sizeof(struct ct6_dsd), 0,
4194 SLAB_HWCACHE_ALIGN, NULL);
4195 if (!ctx_cachep)
4196 goto fail_free_srb_mempool;
4197 }
4198 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
4199 ctx_cachep);
4200 if (!ha->ctx_mempool)
4201 goto fail_free_srb_mempool;
4202 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
4203 "ctx_cachep=%p ctx_mempool=%p.\n",
4204 ctx_cachep, ha->ctx_mempool);
4205 }
4206
4207 /* Get memory for cached NVRAM */
4208 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
4209 if (!ha->nvram)
4210 goto fail_free_ctx_mempool;
4211
4212 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
4213 ha->pdev->device);
4214 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4215 DMA_POOL_SIZE, 8, 0);
4216 if (!ha->s_dma_pool)
4217 goto fail_free_nvram;
4218
4219 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
4220 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
4221 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
4222
4223 if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
4224 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4225 DSD_LIST_DMA_POOL_SIZE, 8, 0);
4226 if (!ha->dl_dma_pool) {
4227 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
4228 "Failed to allocate memory for dl_dma_pool.\n");
4229 goto fail_s_dma_pool;
4230 }
4231
4232 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4233 FCP_CMND_DMA_POOL_SIZE, 8, 0);
4234 if (!ha->fcp_cmnd_dma_pool) {
4235 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
4236 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
4237 goto fail_dl_dma_pool;
4238 }
4239
4240 if (ql2xenabledif) {
4241 u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
4242 struct dsd_dma *dsd, *nxt;
4243 uint i;
4244 /* Creata a DMA pool of buffers for DIF bundling */
4245 ha->dif_bundl_pool = dma_pool_create(name,
4246 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
4247 if (!ha->dif_bundl_pool) {
4248 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4249 "%s: failed create dif_bundl_pool\n",
4250 __func__);
4251 goto fail_dif_bundl_dma_pool;
4252 }
4253
4254 INIT_LIST_HEAD(&ha->pool.good.head);
4255 INIT_LIST_HEAD(&ha->pool.unusable.head);
4256 ha->pool.good.count = 0;
4257 ha->pool.unusable.count = 0;
4258 for (i = 0; i < 128; i++) {
4259 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
4260 if (!dsd) {
4261 ql_dbg_pci(ql_dbg_init, ha->pdev,
4262 0xe0ee, "%s: failed alloc dsd\n",
4263 __func__);
4264 return -ENOMEM;
4265 }
4266 ha->dif_bundle_kallocs++;
4267
4268 dsd->dsd_addr = dma_pool_alloc(
4269 ha->dif_bundl_pool, GFP_ATOMIC,
4270 &dsd->dsd_list_dma);
4271 if (!dsd->dsd_addr) {
4272 ql_dbg_pci(ql_dbg_init, ha->pdev,
4273 0xe0ee,
4274 "%s: failed alloc ->dsd_addr\n",
4275 __func__);
4276 kfree(dsd);
4277 ha->dif_bundle_kallocs--;
4278 continue;
4279 }
4280 ha->dif_bundle_dma_allocs++;
4281
4282 /*
4283 * if DMA buffer crosses 4G boundary,
4284 * put it on bad list
4285 */
4286 if (MSD(dsd->dsd_list_dma) ^
4287 MSD(dsd->dsd_list_dma + bufsize)) {
4288 list_add_tail(&dsd->list,
4289 &ha->pool.unusable.head);
4290 ha->pool.unusable.count++;
4291 } else {
4292 list_add_tail(&dsd->list,
4293 &ha->pool.good.head);
4294 ha->pool.good.count++;
4295 }
4296 }
4297
4298 /* return the good ones back to the pool */
4299 list_for_each_entry_safe(dsd, nxt,
4300 &ha->pool.good.head, list) {
4301 list_del(&dsd->list);
4302 dma_pool_free(ha->dif_bundl_pool,
4303 dsd->dsd_addr, dsd->dsd_list_dma);
4304 ha->dif_bundle_dma_allocs--;
4305 kfree(dsd);
4306 ha->dif_bundle_kallocs--;
4307 }
4308
4309 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4310 "%s: dif dma pool (good=%u unusable=%u)\n",
4311 __func__, ha->pool.good.count,
4312 ha->pool.unusable.count);
4313 }
4314
4315 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
4316 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
4317 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
4318 ha->dif_bundl_pool);
4319 }
4320
4321 /* Allocate memory for SNS commands */
4322 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4323 /* Get consistent memory allocated for SNS commands */
4324 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
4325 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
4326 if (!ha->sns_cmd)
4327 goto fail_dma_pool;
4328 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
4329 "sns_cmd: %p.\n", ha->sns_cmd);
4330 } else {
4331 /* Get consistent memory allocated for MS IOCB */
4332 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4333 &ha->ms_iocb_dma);
4334 if (!ha->ms_iocb)
4335 goto fail_dma_pool;
4336 /* Get consistent memory allocated for CT SNS commands */
4337 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
4338 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
4339 if (!ha->ct_sns)
4340 goto fail_free_ms_iocb;
4341 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
4342 "ms_iocb=%p ct_sns=%p.\n",
4343 ha->ms_iocb, ha->ct_sns);
4344 }
4345
4346 /* Allocate memory for request ring */
4347 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
4348 if (!*req) {
4349 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
4350 "Failed to allocate memory for req.\n");
4351 goto fail_req;
4352 }
4353 (*req)->length = req_len;
4354 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
4355 ((*req)->length + 1) * sizeof(request_t),
4356 &(*req)->dma, GFP_KERNEL);
4357 if (!(*req)->ring) {
4358 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
4359 "Failed to allocate memory for req_ring.\n");
4360 goto fail_req_ring;
4361 }
4362 /* Allocate memory for response ring */
4363 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
4364 if (!*rsp) {
4365 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
4366 "Failed to allocate memory for rsp.\n");
4367 goto fail_rsp;
4368 }
4369 (*rsp)->hw = ha;
4370 (*rsp)->length = rsp_len;
4371 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
4372 ((*rsp)->length + 1) * sizeof(response_t),
4373 &(*rsp)->dma, GFP_KERNEL);
4374 if (!(*rsp)->ring) {
4375 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
4376 "Failed to allocate memory for rsp_ring.\n");
4377 goto fail_rsp_ring;
4378 }
4379 (*req)->rsp = *rsp;
4380 (*rsp)->req = *req;
4381 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
4382 "req=%p req->length=%d req->ring=%p rsp=%p "
4383 "rsp->length=%d rsp->ring=%p.\n",
4384 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
4385 (*rsp)->ring);
4386 /* Allocate memory for NVRAM data for vports */
4387 if (ha->nvram_npiv_size) {
4388 ha->npiv_info = kcalloc(ha->nvram_npiv_size,
4389 sizeof(struct qla_npiv_entry),
4390 GFP_KERNEL);
4391 if (!ha->npiv_info) {
4392 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
4393 "Failed to allocate memory for npiv_info.\n");
4394 goto fail_npiv_info;
4395 }
4396 } else
4397 ha->npiv_info = NULL;
4398
4399 /* Get consistent memory allocated for EX-INIT-CB. */
4400 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
4401 IS_QLA28XX(ha)) {
4402 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4403 &ha->ex_init_cb_dma);
4404 if (!ha->ex_init_cb)
4405 goto fail_ex_init_cb;
4406 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
4407 "ex_init_cb=%p.\n", ha->ex_init_cb);
4408 }
4409
4410 /* Get consistent memory allocated for Special Features-CB. */
4411 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4412 ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
4413 &ha->sf_init_cb_dma);
4414 if (!ha->sf_init_cb)
4415 goto fail_sf_init_cb;
4416 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
4417 "sf_init_cb=%p.\n", ha->sf_init_cb);
4418 }
4419
4420
4421 /* Get consistent memory allocated for Async Port-Database. */
4422 if (!IS_FWI2_CAPABLE(ha)) {
4423 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4424 &ha->async_pd_dma);
4425 if (!ha->async_pd)
4426 goto fail_async_pd;
4427 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
4428 "async_pd=%p.\n", ha->async_pd);
4429 }
4430
4431 INIT_LIST_HEAD(&ha->vp_list);
4432
4433 /* Allocate memory for our loop_id bitmap */
4434 ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
4435 sizeof(long),
4436 GFP_KERNEL);
4437 if (!ha->loop_id_map)
4438 goto fail_loop_id_map;
4439 else {
4440 qla2x00_set_reserved_loop_ids(ha);
4441 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
4442 "loop_id_map=%p.\n", ha->loop_id_map);
4443 }
4444
4445 ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
4446 SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
4447 if (!ha->sfp_data) {
4448 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4449 "Unable to allocate memory for SFP read-data.\n");
4450 goto fail_sfp_data;
4451 }
4452
4453 ha->flt = dma_alloc_coherent(&ha->pdev->dev,
4454 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
4455 GFP_KERNEL);
4456 if (!ha->flt) {
4457 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4458 "Unable to allocate memory for FLT.\n");
4459 goto fail_flt_buffer;
4460 }
4461
4462 /* allocate the purex dma pool */
4463 ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4464 ELS_MAX_PAYLOAD, 8, 0);
4465
4466 if (!ha->purex_dma_pool) {
4467 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4468 "Unable to allocate purex_dma_pool.\n");
4469 goto fail_flt;
4470 }
4471
4472 ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
4473 ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
4474 ha->elsrej.size,
4475 &ha->elsrej.cdma,
4476 GFP_KERNEL);
4477 if (!ha->elsrej.c) {
4478 ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
4479 "Alloc failed for els reject cmd.\n");
4480 goto fail_elsrej;
4481 }
4482 ha->elsrej.c->er_cmd = ELS_LS_RJT;
4483 ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
4484 ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
4485
4486 ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt);
4487 ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size,
4488 &ha->lsrjt.cdma, GFP_KERNEL);
4489 if (!ha->lsrjt.c) {
4490 ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
4491 "Alloc failed for nvme fc reject cmd.\n");
4492 goto fail_lsrjt;
4493 }
4494
4495 return 0;
4496
4497 fail_lsrjt:
4498 dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
4499 ha->elsrej.c, ha->elsrej.cdma);
4500 fail_elsrej:
4501 dma_pool_destroy(ha->purex_dma_pool);
4502 fail_flt:
4503 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4504 ha->flt, ha->flt_dma);
4505
4506 fail_flt_buffer:
4507 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4508 ha->sfp_data, ha->sfp_data_dma);
4509 fail_sfp_data:
4510 kfree(ha->loop_id_map);
4511 fail_loop_id_map:
4512 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4513 fail_async_pd:
4514 dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
4515 fail_sf_init_cb:
4516 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
4517 fail_ex_init_cb:
4518 kfree(ha->npiv_info);
4519 fail_npiv_info:
4520 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
4521 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
4522 (*rsp)->ring = NULL;
4523 (*rsp)->dma = 0;
4524 fail_rsp_ring:
4525 kfree(*rsp);
4526 *rsp = NULL;
4527 fail_rsp:
4528 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
4529 sizeof(request_t), (*req)->ring, (*req)->dma);
4530 (*req)->ring = NULL;
4531 (*req)->dma = 0;
4532 fail_req_ring:
4533 kfree(*req);
4534 *req = NULL;
4535 fail_req:
4536 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4537 ha->ct_sns, ha->ct_sns_dma);
4538 ha->ct_sns = NULL;
4539 ha->ct_sns_dma = 0;
4540 fail_free_ms_iocb:
4541 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4542 ha->ms_iocb = NULL;
4543 ha->ms_iocb_dma = 0;
4544
4545 if (ha->sns_cmd)
4546 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4547 ha->sns_cmd, ha->sns_cmd_dma);
4548 fail_dma_pool:
4549 if (ql2xenabledif) {
4550 struct dsd_dma *dsd, *nxt;
4551
4552 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4553 list) {
4554 list_del(&dsd->list);
4555 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4556 dsd->dsd_list_dma);
4557 ha->dif_bundle_dma_allocs--;
4558 kfree(dsd);
4559 ha->dif_bundle_kallocs--;
4560 ha->pool.unusable.count--;
4561 }
4562 dma_pool_destroy(ha->dif_bundl_pool);
4563 ha->dif_bundl_pool = NULL;
4564 }
4565
4566 fail_dif_bundl_dma_pool:
4567 if (IS_QLA82XX(ha) || ql2xenabledif) {
4568 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4569 ha->fcp_cmnd_dma_pool = NULL;
4570 }
4571 fail_dl_dma_pool:
4572 if (IS_QLA82XX(ha) || ql2xenabledif) {
4573 dma_pool_destroy(ha->dl_dma_pool);
4574 ha->dl_dma_pool = NULL;
4575 }
4576 fail_s_dma_pool:
4577 dma_pool_destroy(ha->s_dma_pool);
4578 ha->s_dma_pool = NULL;
4579 fail_free_nvram:
4580 kfree(ha->nvram);
4581 ha->nvram = NULL;
4582 fail_free_ctx_mempool:
4583 mempool_destroy(ha->ctx_mempool);
4584 ha->ctx_mempool = NULL;
4585 fail_free_srb_mempool:
4586 mempool_destroy(ha->srb_mempool);
4587 ha->srb_mempool = NULL;
4588 fail_free_gid_list:
4589 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4590 ha->gid_list,
4591 ha->gid_list_dma);
4592 ha->gid_list = NULL;
4593 ha->gid_list_dma = 0;
4594 fail_free_tgt_mem:
4595 qlt_mem_free(ha);
4596 fail_free_btree:
4597 btree_destroy32(&ha->host_map);
4598 fail_free_init_cb:
4599 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
4600 ha->init_cb_dma);
4601 ha->init_cb = NULL;
4602 ha->init_cb_dma = 0;
4603 fail_free_vp_map:
4604 kfree(ha->vp_map);
4605 ha->vp_map = NULL;
4606 fail:
4607 ql_log(ql_log_fatal, NULL, 0x0030,
4608 "Memory allocation failure.\n");
4609 return -ENOMEM;
4610 }
4611
4612 int
qla2x00_set_exlogins_buffer(scsi_qla_host_t * vha)4613 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
4614 {
4615 int rval;
4616 uint16_t size, max_cnt;
4617 uint32_t temp;
4618 struct qla_hw_data *ha = vha->hw;
4619
4620 /* Return if we don't need to alloacate any extended logins */
4621 if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
4622 return QLA_SUCCESS;
4623
4624 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
4625 return QLA_SUCCESS;
4626
4627 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
4628 max_cnt = 0;
4629 rval = qla_get_exlogin_status(vha, &size, &max_cnt);
4630 if (rval != QLA_SUCCESS) {
4631 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
4632 "Failed to get exlogin status.\n");
4633 return rval;
4634 }
4635
4636 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
4637 temp *= size;
4638
4639 if (temp != ha->exlogin_size) {
4640 qla2x00_free_exlogin_buffer(ha);
4641 ha->exlogin_size = temp;
4642
4643 ql_log(ql_log_info, vha, 0xd024,
4644 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
4645 max_cnt, size, temp);
4646
4647 ql_log(ql_log_info, vha, 0xd025,
4648 "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
4649
4650 /* Get consistent memory for extended logins */
4651 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
4652 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
4653 if (!ha->exlogin_buf) {
4654 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
4655 "Failed to allocate memory for exlogin_buf_dma.\n");
4656 return -ENOMEM;
4657 }
4658 }
4659
4660 /* Now configure the dma buffer */
4661 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
4662 if (rval) {
4663 ql_log(ql_log_fatal, vha, 0xd033,
4664 "Setup extended login buffer ****FAILED****.\n");
4665 qla2x00_free_exlogin_buffer(ha);
4666 }
4667
4668 return rval;
4669 }
4670
4671 /*
4672 * qla2x00_free_exlogin_buffer
4673 *
4674 * Input:
4675 * ha = adapter block pointer
4676 */
4677 void
qla2x00_free_exlogin_buffer(struct qla_hw_data * ha)4678 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
4679 {
4680 if (ha->exlogin_buf) {
4681 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
4682 ha->exlogin_buf, ha->exlogin_buf_dma);
4683 ha->exlogin_buf = NULL;
4684 ha->exlogin_size = 0;
4685 }
4686 }
4687
4688 static void
qla2x00_number_of_exch(scsi_qla_host_t * vha,u32 * ret_cnt,u16 max_cnt)4689 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
4690 {
4691 u32 temp;
4692 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
4693 *ret_cnt = FW_DEF_EXCHANGES_CNT;
4694
4695 if (max_cnt > vha->hw->max_exchg)
4696 max_cnt = vha->hw->max_exchg;
4697
4698 if (qla_ini_mode_enabled(vha)) {
4699 if (vha->ql2xiniexchg > max_cnt)
4700 vha->ql2xiniexchg = max_cnt;
4701
4702 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4703 *ret_cnt = vha->ql2xiniexchg;
4704
4705 } else if (qla_tgt_mode_enabled(vha)) {
4706 if (vha->ql2xexchoffld > max_cnt) {
4707 vha->ql2xexchoffld = max_cnt;
4708 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4709 }
4710
4711 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
4712 *ret_cnt = vha->ql2xexchoffld;
4713 } else if (qla_dual_mode_enabled(vha)) {
4714 temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
4715 if (temp > max_cnt) {
4716 vha->ql2xiniexchg -= (temp - max_cnt)/2;
4717 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
4718 temp = max_cnt;
4719 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4720 }
4721
4722 if (temp > FW_DEF_EXCHANGES_CNT)
4723 *ret_cnt = temp;
4724 }
4725 }
4726
4727 int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t * vha)4728 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4729 {
4730 int rval;
4731 u16 size, max_cnt;
4732 u32 actual_cnt, totsz;
4733 struct qla_hw_data *ha = vha->hw;
4734
4735 if (!ha->flags.exchoffld_enabled)
4736 return QLA_SUCCESS;
4737
4738 if (!IS_EXCHG_OFFLD_CAPABLE(ha))
4739 return QLA_SUCCESS;
4740
4741 max_cnt = 0;
4742 rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
4743 if (rval != QLA_SUCCESS) {
4744 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
4745 "Failed to get exlogin status.\n");
4746 return rval;
4747 }
4748
4749 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
4750 ql_log(ql_log_info, vha, 0xd014,
4751 "Actual exchange offload count: %d.\n", actual_cnt);
4752
4753 totsz = actual_cnt * size;
4754
4755 if (totsz != ha->exchoffld_size) {
4756 qla2x00_free_exchoffld_buffer(ha);
4757 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
4758 ha->exchoffld_size = 0;
4759 ha->flags.exchoffld_enabled = 0;
4760 return QLA_SUCCESS;
4761 }
4762
4763 ha->exchoffld_size = totsz;
4764
4765 ql_log(ql_log_info, vha, 0xd016,
4766 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
4767 max_cnt, actual_cnt, size, totsz);
4768
4769 ql_log(ql_log_info, vha, 0xd017,
4770 "Exchange Buffers requested size = 0x%x\n",
4771 ha->exchoffld_size);
4772
4773 /* Get consistent memory for extended logins */
4774 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
4775 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
4776 if (!ha->exchoffld_buf) {
4777 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4778 "Failed to allocate memory for Exchange Offload.\n");
4779
4780 if (ha->max_exchg >
4781 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
4782 ha->max_exchg -= REDUCE_EXCHANGES_CNT;
4783 } else if (ha->max_exchg >
4784 (FW_DEF_EXCHANGES_CNT + 512)) {
4785 ha->max_exchg -= 512;
4786 } else {
4787 ha->flags.exchoffld_enabled = 0;
4788 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4789 "Disabling Exchange offload due to lack of memory\n");
4790 }
4791 ha->exchoffld_size = 0;
4792
4793 return -ENOMEM;
4794 }
4795 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
4796 /* pathological case */
4797 qla2x00_free_exchoffld_buffer(ha);
4798 ha->exchoffld_size = 0;
4799 ha->flags.exchoffld_enabled = 0;
4800 ql_log(ql_log_info, vha, 0xd016,
4801 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
4802 ha->exchoffld_size, actual_cnt, size, totsz);
4803 return 0;
4804 }
4805
4806 /* Now configure the dma buffer */
4807 rval = qla_set_exchoffld_mem_cfg(vha);
4808 if (rval) {
4809 ql_log(ql_log_fatal, vha, 0xd02e,
4810 "Setup exchange offload buffer ****FAILED****.\n");
4811 qla2x00_free_exchoffld_buffer(ha);
4812 } else {
4813 /* re-adjust number of target exchange */
4814 struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
4815
4816 if (qla_ini_mode_enabled(vha))
4817 icb->exchange_count = 0;
4818 else
4819 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4820 }
4821
4822 return rval;
4823 }
4824
4825 /*
4826 * qla2x00_free_exchoffld_buffer
4827 *
4828 * Input:
4829 * ha = adapter block pointer
4830 */
4831 void
qla2x00_free_exchoffld_buffer(struct qla_hw_data * ha)4832 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
4833 {
4834 if (ha->exchoffld_buf) {
4835 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
4836 ha->exchoffld_buf, ha->exchoffld_buf_dma);
4837 ha->exchoffld_buf = NULL;
4838 ha->exchoffld_size = 0;
4839 }
4840 }
4841
4842 /*
4843 * qla2x00_free_fw_dump
4844 * Frees fw dump stuff.
4845 *
4846 * Input:
4847 * ha = adapter block pointer
4848 */
4849 static void
qla2x00_free_fw_dump(struct qla_hw_data * ha)4850 qla2x00_free_fw_dump(struct qla_hw_data *ha)
4851 {
4852 struct fwdt *fwdt = ha->fwdt;
4853 uint j;
4854
4855 if (ha->fce)
4856 dma_free_coherent(&ha->pdev->dev,
4857 FCE_SIZE, ha->fce, ha->fce_dma);
4858
4859 if (ha->eft)
4860 dma_free_coherent(&ha->pdev->dev,
4861 EFT_SIZE, ha->eft, ha->eft_dma);
4862
4863 vfree(ha->fw_dump);
4864
4865 ha->fce = NULL;
4866 ha->fce_dma = 0;
4867 ha->flags.fce_enabled = 0;
4868 ha->eft = NULL;
4869 ha->eft_dma = 0;
4870 ha->fw_dumped = false;
4871 ha->fw_dump_cap_flags = 0;
4872 ha->fw_dump_reading = 0;
4873 ha->fw_dump = NULL;
4874 ha->fw_dump_len = 0;
4875
4876 for (j = 0; j < 2; j++, fwdt++) {
4877 vfree(fwdt->template);
4878 fwdt->template = NULL;
4879 fwdt->length = 0;
4880 }
4881 }
4882
4883 /*
4884 * qla2x00_mem_free
4885 * Frees all adapter allocated memory.
4886 *
4887 * Input:
4888 * ha = adapter block pointer.
4889 */
4890 static void
qla2x00_mem_free(struct qla_hw_data * ha)4891 qla2x00_mem_free(struct qla_hw_data *ha)
4892 {
4893 qla2x00_free_fw_dump(ha);
4894
4895 if (ha->mctp_dump)
4896 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
4897 ha->mctp_dump_dma);
4898 ha->mctp_dump = NULL;
4899
4900 mempool_destroy(ha->srb_mempool);
4901 ha->srb_mempool = NULL;
4902
4903 if (ha->dcbx_tlv)
4904 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
4905 ha->dcbx_tlv, ha->dcbx_tlv_dma);
4906 ha->dcbx_tlv = NULL;
4907
4908 if (ha->xgmac_data)
4909 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
4910 ha->xgmac_data, ha->xgmac_data_dma);
4911 ha->xgmac_data = NULL;
4912
4913 if (ha->sns_cmd)
4914 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4915 ha->sns_cmd, ha->sns_cmd_dma);
4916 ha->sns_cmd = NULL;
4917 ha->sns_cmd_dma = 0;
4918
4919 if (ha->ct_sns)
4920 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4921 ha->ct_sns, ha->ct_sns_dma);
4922 ha->ct_sns = NULL;
4923 ha->ct_sns_dma = 0;
4924
4925 if (ha->sfp_data)
4926 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
4927 ha->sfp_data_dma);
4928 ha->sfp_data = NULL;
4929
4930 if (ha->flt)
4931 dma_free_coherent(&ha->pdev->dev,
4932 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
4933 ha->flt, ha->flt_dma);
4934 ha->flt = NULL;
4935 ha->flt_dma = 0;
4936
4937 if (ha->ms_iocb)
4938 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4939 ha->ms_iocb = NULL;
4940 ha->ms_iocb_dma = 0;
4941
4942 if (ha->sf_init_cb)
4943 dma_pool_free(ha->s_dma_pool,
4944 ha->sf_init_cb, ha->sf_init_cb_dma);
4945
4946 if (ha->ex_init_cb)
4947 dma_pool_free(ha->s_dma_pool,
4948 ha->ex_init_cb, ha->ex_init_cb_dma);
4949 ha->ex_init_cb = NULL;
4950 ha->ex_init_cb_dma = 0;
4951
4952 if (ha->async_pd)
4953 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4954 ha->async_pd = NULL;
4955 ha->async_pd_dma = 0;
4956
4957 dma_pool_destroy(ha->s_dma_pool);
4958 ha->s_dma_pool = NULL;
4959
4960 if (ha->gid_list)
4961 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4962 ha->gid_list, ha->gid_list_dma);
4963 ha->gid_list = NULL;
4964 ha->gid_list_dma = 0;
4965
4966 if (ha->base_qpair && !list_empty(&ha->base_qpair->dsd_list)) {
4967 struct dsd_dma *dsd_ptr, *tdsd_ptr;
4968
4969 /* clean up allocated prev pool */
4970 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
4971 &ha->base_qpair->dsd_list, list) {
4972 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
4973 dsd_ptr->dsd_list_dma);
4974 list_del(&dsd_ptr->list);
4975 kfree(dsd_ptr);
4976 }
4977 }
4978
4979 dma_pool_destroy(ha->dl_dma_pool);
4980 ha->dl_dma_pool = NULL;
4981
4982 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4983 ha->fcp_cmnd_dma_pool = NULL;
4984
4985 mempool_destroy(ha->ctx_mempool);
4986 ha->ctx_mempool = NULL;
4987
4988 if (ql2xenabledif && ha->dif_bundl_pool) {
4989 struct dsd_dma *dsd, *nxt;
4990
4991 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4992 list) {
4993 list_del(&dsd->list);
4994 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4995 dsd->dsd_list_dma);
4996 ha->dif_bundle_dma_allocs--;
4997 kfree(dsd);
4998 ha->dif_bundle_kallocs--;
4999 ha->pool.unusable.count--;
5000 }
5001 list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
5002 list_del(&dsd->list);
5003 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
5004 dsd->dsd_list_dma);
5005 ha->dif_bundle_dma_allocs--;
5006 kfree(dsd);
5007 ha->dif_bundle_kallocs--;
5008 }
5009 }
5010
5011 dma_pool_destroy(ha->dif_bundl_pool);
5012 ha->dif_bundl_pool = NULL;
5013
5014 qlt_mem_free(ha);
5015 qla_remove_hostmap(ha);
5016
5017 if (ha->init_cb)
5018 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
5019 ha->init_cb, ha->init_cb_dma);
5020
5021 dma_pool_destroy(ha->purex_dma_pool);
5022 ha->purex_dma_pool = NULL;
5023
5024 if (ha->elsrej.c) {
5025 dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
5026 ha->elsrej.c, ha->elsrej.cdma);
5027 ha->elsrej.c = NULL;
5028 }
5029
5030 if (ha->lsrjt.c) {
5031 dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c,
5032 ha->lsrjt.cdma);
5033 ha->lsrjt.c = NULL;
5034 }
5035
5036 ha->init_cb = NULL;
5037 ha->init_cb_dma = 0;
5038
5039 vfree(ha->optrom_buffer);
5040 ha->optrom_buffer = NULL;
5041 kfree(ha->nvram);
5042 ha->nvram = NULL;
5043 kfree(ha->npiv_info);
5044 ha->npiv_info = NULL;
5045 kfree(ha->swl);
5046 ha->swl = NULL;
5047 kfree(ha->loop_id_map);
5048 ha->sf_init_cb = NULL;
5049 ha->sf_init_cb_dma = 0;
5050 ha->loop_id_map = NULL;
5051
5052 kfree(ha->vp_map);
5053 ha->vp_map = NULL;
5054 }
5055
qla2x00_create_host(const struct scsi_host_template * sht,struct qla_hw_data * ha)5056 struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *sht,
5057 struct qla_hw_data *ha)
5058 {
5059 struct Scsi_Host *host;
5060 struct scsi_qla_host *vha = NULL;
5061
5062 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
5063 if (!host) {
5064 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
5065 "Failed to allocate host from the scsi layer, aborting.\n");
5066 return NULL;
5067 }
5068
5069 /* Clear our data area */
5070 vha = shost_priv(host);
5071 memset(vha, 0, sizeof(scsi_qla_host_t));
5072
5073 vha->host = host;
5074 vha->host_no = host->host_no;
5075 vha->hw = ha;
5076
5077 vha->qlini_mode = ql2x_ini_mode;
5078 vha->ql2xexchoffld = ql2xexchoffld;
5079 vha->ql2xiniexchg = ql2xiniexchg;
5080
5081 INIT_LIST_HEAD(&vha->vp_fcports);
5082 INIT_LIST_HEAD(&vha->work_list);
5083 INIT_LIST_HEAD(&vha->list);
5084 INIT_LIST_HEAD(&vha->qla_cmd_list);
5085 INIT_LIST_HEAD(&vha->logo_list);
5086 INIT_LIST_HEAD(&vha->plogi_ack_list);
5087 INIT_LIST_HEAD(&vha->qp_list);
5088 INIT_LIST_HEAD(&vha->gnl.fcports);
5089 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
5090
5091 INIT_LIST_HEAD(&vha->purex_list.head);
5092 spin_lock_init(&vha->purex_list.lock);
5093
5094 spin_lock_init(&vha->work_lock);
5095 spin_lock_init(&vha->cmd_list_lock);
5096 init_waitqueue_head(&vha->fcport_waitQ);
5097 init_waitqueue_head(&vha->vref_waitq);
5098 qla_enode_init(vha);
5099 qla_edb_init(vha);
5100
5101
5102 vha->gnl.size = sizeof(struct get_name_list_extended) *
5103 (ha->max_loop_id + 1);
5104 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
5105 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
5106 if (!vha->gnl.l) {
5107 ql_log(ql_log_fatal, vha, 0xd04a,
5108 "Alloc failed for name list.\n");
5109 scsi_host_put(vha->host);
5110 return NULL;
5111 }
5112
5113 /* todo: what about ext login? */
5114 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
5115 vha->scan.l = vmalloc(vha->scan.size);
5116 if (!vha->scan.l) {
5117 ql_log(ql_log_fatal, vha, 0xd04a,
5118 "Alloc failed for scan database.\n");
5119 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
5120 vha->gnl.l, vha->gnl.ldma);
5121 vha->gnl.l = NULL;
5122 scsi_host_put(vha->host);
5123 return NULL;
5124 }
5125 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
5126
5127 snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu",
5128 QLA2XXX_DRIVER_NAME, vha->host_no);
5129 ql_dbg(ql_dbg_init, vha, 0x0041,
5130 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
5131 vha->host, vha->hw, vha,
5132 dev_name(&(ha->pdev->dev)));
5133
5134 return vha;
5135 }
5136
5137 struct qla_work_evt *
qla2x00_alloc_work(struct scsi_qla_host * vha,enum qla_work_type type)5138 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
5139 {
5140 struct qla_work_evt *e;
5141
5142 if (test_bit(UNLOADING, &vha->dpc_flags))
5143 return NULL;
5144
5145 if (qla_vha_mark_busy(vha))
5146 return NULL;
5147
5148 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
5149 if (!e) {
5150 QLA_VHA_MARK_NOT_BUSY(vha);
5151 return NULL;
5152 }
5153
5154 INIT_LIST_HEAD(&e->list);
5155 e->type = type;
5156 e->flags = QLA_EVT_FLAG_FREE;
5157 return e;
5158 }
5159
5160 int
qla2x00_post_work(struct scsi_qla_host * vha,struct qla_work_evt * e)5161 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
5162 {
5163 unsigned long flags;
5164 bool q = false;
5165
5166 spin_lock_irqsave(&vha->work_lock, flags);
5167 list_add_tail(&e->list, &vha->work_list);
5168
5169 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
5170 q = true;
5171
5172 spin_unlock_irqrestore(&vha->work_lock, flags);
5173
5174 if (q)
5175 queue_work(vha->hw->wq, &vha->iocb_work);
5176
5177 return QLA_SUCCESS;
5178 }
5179
5180 int
qla2x00_post_aen_work(struct scsi_qla_host * vha,enum fc_host_event_code code,u32 data)5181 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
5182 u32 data)
5183 {
5184 struct qla_work_evt *e;
5185
5186 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
5187 if (!e)
5188 return QLA_FUNCTION_FAILED;
5189
5190 e->u.aen.code = code;
5191 e->u.aen.data = data;
5192 return qla2x00_post_work(vha, e);
5193 }
5194
5195 int
qla2x00_post_idc_ack_work(struct scsi_qla_host * vha,uint16_t * mb)5196 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
5197 {
5198 struct qla_work_evt *e;
5199
5200 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
5201 if (!e)
5202 return QLA_FUNCTION_FAILED;
5203
5204 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
5205 return qla2x00_post_work(vha, e);
5206 }
5207
5208 #define qla2x00_post_async_work(name, type) \
5209 int qla2x00_post_async_##name##_work( \
5210 struct scsi_qla_host *vha, \
5211 fc_port_t *fcport, uint16_t *data) \
5212 { \
5213 struct qla_work_evt *e; \
5214 \
5215 e = qla2x00_alloc_work(vha, type); \
5216 if (!e) \
5217 return QLA_FUNCTION_FAILED; \
5218 \
5219 e->u.logio.fcport = fcport; \
5220 if (data) { \
5221 e->u.logio.data[0] = data[0]; \
5222 e->u.logio.data[1] = data[1]; \
5223 } \
5224 fcport->flags |= FCF_ASYNC_ACTIVE; \
5225 return qla2x00_post_work(vha, e); \
5226 }
5227
5228 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
5229 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
5230 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
5231 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
5232 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
5233
5234 int
qla2x00_post_uevent_work(struct scsi_qla_host * vha,u32 code)5235 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
5236 {
5237 struct qla_work_evt *e;
5238
5239 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
5240 if (!e)
5241 return QLA_FUNCTION_FAILED;
5242
5243 e->u.uevent.code = code;
5244 return qla2x00_post_work(vha, e);
5245 }
5246
5247 static void
qla2x00_uevent_emit(struct scsi_qla_host * vha,u32 code)5248 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
5249 {
5250 char event_string[40];
5251 char *envp[] = { event_string, NULL };
5252
5253 switch (code) {
5254 case QLA_UEVENT_CODE_FW_DUMP:
5255 snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
5256 vha->host_no);
5257 break;
5258 default:
5259 /* do nothing */
5260 break;
5261 }
5262 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
5263 }
5264
5265 int
qlafx00_post_aenfx_work(struct scsi_qla_host * vha,uint32_t evtcode,uint32_t * data,int cnt)5266 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
5267 uint32_t *data, int cnt)
5268 {
5269 struct qla_work_evt *e;
5270
5271 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
5272 if (!e)
5273 return QLA_FUNCTION_FAILED;
5274
5275 e->u.aenfx.evtcode = evtcode;
5276 e->u.aenfx.count = cnt;
5277 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
5278 return qla2x00_post_work(vha, e);
5279 }
5280
qla24xx_sched_upd_fcport(fc_port_t * fcport)5281 void qla24xx_sched_upd_fcport(fc_port_t *fcport)
5282 {
5283 unsigned long flags;
5284
5285 if (IS_SW_RESV_ADDR(fcport->d_id))
5286 return;
5287
5288 spin_lock_irqsave(&fcport->vha->work_lock, flags);
5289 if (fcport->disc_state == DSC_UPD_FCPORT) {
5290 spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
5291 return;
5292 }
5293 fcport->jiffies_at_registration = jiffies;
5294 fcport->sec_since_registration = 0;
5295 fcport->next_disc_state = DSC_DELETED;
5296 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5297 spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
5298
5299 queue_work(system_unbound_wq, &fcport->reg_work);
5300 }
5301
5302 static
qla24xx_create_new_sess(struct scsi_qla_host * vha,struct qla_work_evt * e)5303 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5304 {
5305 unsigned long flags;
5306 fc_port_t *fcport = NULL, *tfcp;
5307 struct qlt_plogi_ack_t *pla =
5308 (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
5309 uint8_t free_fcport = 0;
5310
5311 ql_dbg(ql_dbg_disc, vha, 0xffff,
5312 "%s %d %8phC enter\n",
5313 __func__, __LINE__, e->u.new_sess.port_name);
5314
5315 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5316 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
5317 if (fcport) {
5318 fcport->d_id = e->u.new_sess.id;
5319 if (pla) {
5320 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
5321 memcpy(fcport->node_name,
5322 pla->iocb.u.isp24.u.plogi.node_name,
5323 WWN_SIZE);
5324 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
5325 /* we took an extra ref_count to prevent PLOGI ACK when
5326 * fcport/sess has not been created.
5327 */
5328 pla->ref_count--;
5329 }
5330 } else {
5331 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5332 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5333 if (fcport) {
5334 fcport->d_id = e->u.new_sess.id;
5335 fcport->flags |= FCF_FABRIC_DEVICE;
5336 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
5337 fcport->tgt_short_link_down_cnt = 0;
5338
5339 memcpy(fcport->port_name, e->u.new_sess.port_name,
5340 WWN_SIZE);
5341
5342 fcport->fc4_type = e->u.new_sess.fc4_type;
5343 if (NVME_PRIORITY(vha->hw, fcport))
5344 fcport->do_prli_nvme = 1;
5345 else
5346 fcport->do_prli_nvme = 0;
5347
5348 if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
5349 fcport->dm_login_expire = jiffies +
5350 QLA_N2N_WAIT_TIME * HZ;
5351 fcport->fc4_type = FS_FC4TYPE_FCP;
5352 fcport->n2n_flag = 1;
5353 if (vha->flags.nvme_enabled)
5354 fcport->fc4_type |= FS_FC4TYPE_NVME;
5355 }
5356
5357 } else {
5358 ql_dbg(ql_dbg_disc, vha, 0xffff,
5359 "%s %8phC mem alloc fail.\n",
5360 __func__, e->u.new_sess.port_name);
5361
5362 if (pla) {
5363 list_del(&pla->list);
5364 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5365 }
5366 return;
5367 }
5368
5369 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5370 /* search again to make sure no one else got ahead */
5371 tfcp = qla2x00_find_fcport_by_wwpn(vha,
5372 e->u.new_sess.port_name, 1);
5373 if (tfcp) {
5374 /* should rarily happen */
5375 ql_dbg(ql_dbg_disc, vha, 0xffff,
5376 "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
5377 __func__, tfcp->port_name, tfcp->disc_state,
5378 tfcp->fw_login_state);
5379
5380 free_fcport = 1;
5381 } else {
5382 list_add_tail(&fcport->list, &vha->vp_fcports);
5383
5384 }
5385 if (pla) {
5386 qlt_plogi_ack_link(vha, pla, fcport,
5387 QLT_PLOGI_LINK_SAME_WWN);
5388 pla->ref_count--;
5389 }
5390 }
5391 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5392
5393 if (fcport) {
5394 fcport->id_changed = 1;
5395 fcport->scan_state = QLA_FCPORT_FOUND;
5396 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5397 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
5398
5399 if (pla) {
5400 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
5401 u16 wd3_lo;
5402
5403 fcport->fw_login_state = DSC_LS_PRLI_PEND;
5404 fcport->local = 0;
5405 fcport->loop_id =
5406 le16_to_cpu(
5407 pla->iocb.u.isp24.nport_handle);
5408 fcport->fw_login_state = DSC_LS_PRLI_PEND;
5409 wd3_lo =
5410 le16_to_cpu(
5411 pla->iocb.u.isp24.u.prli.wd3_lo);
5412
5413 if (wd3_lo & BIT_7)
5414 fcport->conf_compl_supported = 1;
5415
5416 if ((wd3_lo & BIT_4) == 0)
5417 fcport->port_type = FCT_INITIATOR;
5418 else
5419 fcport->port_type = FCT_TARGET;
5420 }
5421 qlt_plogi_ack_unref(vha, pla);
5422 } else {
5423 fc_port_t *dfcp = NULL;
5424
5425 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5426 tfcp = qla2x00_find_fcport_by_nportid(vha,
5427 &e->u.new_sess.id, 1);
5428 if (tfcp && (tfcp != fcport)) {
5429 /*
5430 * We have a conflict fcport with same NportID.
5431 */
5432 ql_dbg(ql_dbg_disc, vha, 0xffff,
5433 "%s %8phC found conflict b4 add. DS %d LS %d\n",
5434 __func__, tfcp->port_name, tfcp->disc_state,
5435 tfcp->fw_login_state);
5436
5437 switch (tfcp->disc_state) {
5438 case DSC_DELETED:
5439 break;
5440 case DSC_DELETE_PEND:
5441 fcport->login_pause = 1;
5442 tfcp->conflict = fcport;
5443 break;
5444 default:
5445 fcport->login_pause = 1;
5446 tfcp->conflict = fcport;
5447 dfcp = tfcp;
5448 break;
5449 }
5450 }
5451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5452 if (dfcp)
5453 qlt_schedule_sess_for_deletion(tfcp);
5454
5455 if (N2N_TOPO(vha->hw)) {
5456 fcport->flags &= ~FCF_FABRIC_DEVICE;
5457 fcport->keep_nport_handle = 1;
5458 if (vha->flags.nvme_enabled) {
5459 fcport->fc4_type =
5460 (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
5461 fcport->n2n_flag = 1;
5462 }
5463 fcport->fw_login_state = 0;
5464
5465 schedule_delayed_work(&vha->scan.scan_work, 5);
5466 } else {
5467 qla24xx_fcport_handle_login(vha, fcport);
5468 }
5469 }
5470 }
5471
5472 if (free_fcport) {
5473 qla2x00_free_fcport(fcport);
5474 if (pla) {
5475 list_del(&pla->list);
5476 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5477 }
5478 }
5479 }
5480
qla_sp_retry(struct scsi_qla_host * vha,struct qla_work_evt * e)5481 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
5482 {
5483 struct srb *sp = e->u.iosb.sp;
5484 int rval;
5485
5486 rval = qla2x00_start_sp(sp);
5487 if (rval != QLA_SUCCESS) {
5488 ql_dbg(ql_dbg_disc, vha, 0x2043,
5489 "%s: %s: Re-issue IOCB failed (%d).\n",
5490 __func__, sp->name, rval);
5491 qla24xx_sp_unmap(vha, sp);
5492 }
5493 }
5494
5495 void
qla2x00_do_work(struct scsi_qla_host * vha)5496 qla2x00_do_work(struct scsi_qla_host *vha)
5497 {
5498 struct qla_work_evt *e, *tmp;
5499 unsigned long flags;
5500 LIST_HEAD(work);
5501 int rc;
5502
5503 spin_lock_irqsave(&vha->work_lock, flags);
5504 list_splice_init(&vha->work_list, &work);
5505 spin_unlock_irqrestore(&vha->work_lock, flags);
5506
5507 list_for_each_entry_safe(e, tmp, &work, list) {
5508 rc = QLA_SUCCESS;
5509 switch (e->type) {
5510 case QLA_EVT_AEN:
5511 fc_host_post_event(vha->host, fc_get_event_number(),
5512 e->u.aen.code, e->u.aen.data);
5513 break;
5514 case QLA_EVT_IDC_ACK:
5515 qla81xx_idc_ack(vha, e->u.idc_ack.mb);
5516 break;
5517 case QLA_EVT_ASYNC_LOGIN:
5518 qla2x00_async_login(vha, e->u.logio.fcport,
5519 e->u.logio.data);
5520 break;
5521 case QLA_EVT_ASYNC_LOGOUT:
5522 rc = qla2x00_async_logout(vha, e->u.logio.fcport);
5523 break;
5524 case QLA_EVT_ASYNC_ADISC:
5525 qla2x00_async_adisc(vha, e->u.logio.fcport,
5526 e->u.logio.data);
5527 break;
5528 case QLA_EVT_UEVENT:
5529 qla2x00_uevent_emit(vha, e->u.uevent.code);
5530 break;
5531 case QLA_EVT_AENFX:
5532 qlafx00_process_aen(vha, e);
5533 break;
5534 case QLA_EVT_UNMAP:
5535 qla24xx_sp_unmap(vha, e->u.iosb.sp);
5536 break;
5537 case QLA_EVT_RELOGIN:
5538 qla2x00_relogin(vha);
5539 break;
5540 case QLA_EVT_NEW_SESS:
5541 qla24xx_create_new_sess(vha, e);
5542 break;
5543 case QLA_EVT_GPDB:
5544 qla24xx_async_gpdb(vha, e->u.fcport.fcport,
5545 e->u.fcport.opt);
5546 break;
5547 case QLA_EVT_PRLI:
5548 qla24xx_async_prli(vha, e->u.fcport.fcport);
5549 break;
5550 case QLA_EVT_GPSC:
5551 qla24xx_async_gpsc(vha, e->u.fcport.fcport);
5552 break;
5553 case QLA_EVT_GNL:
5554 qla24xx_async_gnl(vha, e->u.fcport.fcport);
5555 break;
5556 case QLA_EVT_NACK:
5557 qla24xx_do_nack_work(vha, e);
5558 break;
5559 case QLA_EVT_ASYNC_PRLO:
5560 rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
5561 break;
5562 case QLA_EVT_ASYNC_PRLO_DONE:
5563 qla2x00_async_prlo_done(vha, e->u.logio.fcport,
5564 e->u.logio.data);
5565 break;
5566 case QLA_EVT_GPNFT:
5567 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
5568 e->u.gpnft.sp);
5569 break;
5570 case QLA_EVT_GPNFT_DONE:
5571 qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
5572 break;
5573 case QLA_EVT_GNNFT_DONE:
5574 qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
5575 break;
5576 case QLA_EVT_GFPNID:
5577 qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
5578 break;
5579 case QLA_EVT_SP_RETRY:
5580 qla_sp_retry(vha, e);
5581 break;
5582 case QLA_EVT_IIDMA:
5583 qla_do_iidma_work(vha, e->u.fcport.fcport);
5584 break;
5585 case QLA_EVT_ELS_PLOGI:
5586 qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
5587 e->u.fcport.fcport);
5588 break;
5589 case QLA_EVT_SA_REPLACE:
5590 rc = qla24xx_issue_sa_replace_iocb(vha, e);
5591 break;
5592 }
5593
5594 if (rc == EAGAIN) {
5595 /* put 'work' at head of 'vha->work_list' */
5596 spin_lock_irqsave(&vha->work_lock, flags);
5597 list_splice(&work, &vha->work_list);
5598 spin_unlock_irqrestore(&vha->work_lock, flags);
5599 break;
5600 }
5601 list_del_init(&e->list);
5602 if (e->flags & QLA_EVT_FLAG_FREE)
5603 kfree(e);
5604
5605 /* For each work completed decrement vha ref count */
5606 QLA_VHA_MARK_NOT_BUSY(vha);
5607 }
5608 }
5609
qla24xx_post_relogin_work(struct scsi_qla_host * vha)5610 int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
5611 {
5612 struct qla_work_evt *e;
5613
5614 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
5615
5616 if (!e) {
5617 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5618 return QLA_FUNCTION_FAILED;
5619 }
5620
5621 return qla2x00_post_work(vha, e);
5622 }
5623
5624 /* Relogins all the fcports of a vport
5625 * Context: dpc thread
5626 */
qla2x00_relogin(struct scsi_qla_host * vha)5627 void qla2x00_relogin(struct scsi_qla_host *vha)
5628 {
5629 fc_port_t *fcport;
5630 int status, relogin_needed = 0;
5631 struct event_arg ea;
5632
5633 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5634 /*
5635 * If the port is not ONLINE then try to login
5636 * to it if we haven't run out of retries.
5637 */
5638 if (atomic_read(&fcport->state) != FCS_ONLINE &&
5639 fcport->login_retry) {
5640 if (fcport->scan_state != QLA_FCPORT_FOUND ||
5641 fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
5642 fcport->disc_state == DSC_LOGIN_COMPLETE)
5643 continue;
5644
5645 if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
5646 fcport->disc_state == DSC_DELETE_PEND) {
5647 relogin_needed = 1;
5648 } else {
5649 if (vha->hw->current_topology != ISP_CFG_NL) {
5650 memset(&ea, 0, sizeof(ea));
5651 ea.fcport = fcport;
5652 qla24xx_handle_relogin_event(vha, &ea);
5653 } else if (vha->hw->current_topology ==
5654 ISP_CFG_NL &&
5655 IS_QLA2XXX_MIDTYPE(vha->hw)) {
5656 (void)qla24xx_fcport_handle_login(vha,
5657 fcport);
5658 } else if (vha->hw->current_topology ==
5659 ISP_CFG_NL) {
5660 fcport->login_retry--;
5661 status =
5662 qla2x00_local_device_login(vha,
5663 fcport);
5664 if (status == QLA_SUCCESS) {
5665 fcport->old_loop_id =
5666 fcport->loop_id;
5667 ql_dbg(ql_dbg_disc, vha, 0x2003,
5668 "Port login OK: logged in ID 0x%x.\n",
5669 fcport->loop_id);
5670 qla2x00_update_fcport
5671 (vha, fcport);
5672 } else if (status == 1) {
5673 set_bit(RELOGIN_NEEDED,
5674 &vha->dpc_flags);
5675 /* retry the login again */
5676 ql_dbg(ql_dbg_disc, vha, 0x2007,
5677 "Retrying %d login again loop_id 0x%x.\n",
5678 fcport->login_retry,
5679 fcport->loop_id);
5680 } else {
5681 fcport->login_retry = 0;
5682 }
5683
5684 if (fcport->login_retry == 0 &&
5685 status != QLA_SUCCESS)
5686 qla2x00_clear_loop_id(fcport);
5687 }
5688 }
5689 }
5690 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5691 break;
5692 }
5693
5694 if (relogin_needed)
5695 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5696
5697 ql_dbg(ql_dbg_disc, vha, 0x400e,
5698 "Relogin end.\n");
5699 }
5700
5701 /* Schedule work on any of the dpc-workqueues */
5702 void
qla83xx_schedule_work(scsi_qla_host_t * base_vha,int work_code)5703 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
5704 {
5705 struct qla_hw_data *ha = base_vha->hw;
5706
5707 switch (work_code) {
5708 case MBA_IDC_AEN: /* 0x8200 */
5709 if (ha->dpc_lp_wq)
5710 queue_work(ha->dpc_lp_wq, &ha->idc_aen);
5711 break;
5712
5713 case QLA83XX_NIC_CORE_RESET: /* 0x1 */
5714 if (!ha->flags.nic_core_reset_hdlr_active) {
5715 if (ha->dpc_hp_wq)
5716 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
5717 } else
5718 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
5719 "NIC Core reset is already active. Skip "
5720 "scheduling it again.\n");
5721 break;
5722 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
5723 if (ha->dpc_hp_wq)
5724 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
5725 break;
5726 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
5727 if (ha->dpc_hp_wq)
5728 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
5729 break;
5730 default:
5731 ql_log(ql_log_warn, base_vha, 0xb05f,
5732 "Unknown work-code=0x%x.\n", work_code);
5733 }
5734
5735 return;
5736 }
5737
5738 /* Work: Perform NIC Core Unrecoverable state handling */
5739 void
qla83xx_nic_core_unrecoverable_work(struct work_struct * work)5740 qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
5741 {
5742 struct qla_hw_data *ha =
5743 container_of(work, struct qla_hw_data, nic_core_unrecoverable);
5744 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5745 uint32_t dev_state = 0;
5746
5747 qla83xx_idc_lock(base_vha, 0);
5748 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5749 qla83xx_reset_ownership(base_vha);
5750 if (ha->flags.nic_core_reset_owner) {
5751 ha->flags.nic_core_reset_owner = 0;
5752 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5753 QLA8XXX_DEV_FAILED);
5754 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
5755 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
5756 }
5757 qla83xx_idc_unlock(base_vha, 0);
5758 }
5759
5760 /* Work: Execute IDC state handler */
5761 void
qla83xx_idc_state_handler_work(struct work_struct * work)5762 qla83xx_idc_state_handler_work(struct work_struct *work)
5763 {
5764 struct qla_hw_data *ha =
5765 container_of(work, struct qla_hw_data, idc_state_handler);
5766 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5767 uint32_t dev_state = 0;
5768
5769 qla83xx_idc_lock(base_vha, 0);
5770 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5771 if (dev_state == QLA8XXX_DEV_FAILED ||
5772 dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
5773 qla83xx_idc_state_handler(base_vha);
5774 qla83xx_idc_unlock(base_vha, 0);
5775 }
5776
5777 static int
qla83xx_check_nic_core_fw_alive(scsi_qla_host_t * base_vha)5778 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
5779 {
5780 int rval = QLA_SUCCESS;
5781 unsigned long heart_beat_wait = jiffies + (1 * HZ);
5782 uint32_t heart_beat_counter1, heart_beat_counter2;
5783
5784 do {
5785 if (time_after(jiffies, heart_beat_wait)) {
5786 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
5787 "Nic Core f/w is not alive.\n");
5788 rval = QLA_FUNCTION_FAILED;
5789 break;
5790 }
5791
5792 qla83xx_idc_lock(base_vha, 0);
5793 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
5794 &heart_beat_counter1);
5795 qla83xx_idc_unlock(base_vha, 0);
5796 msleep(100);
5797 qla83xx_idc_lock(base_vha, 0);
5798 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
5799 &heart_beat_counter2);
5800 qla83xx_idc_unlock(base_vha, 0);
5801 } while (heart_beat_counter1 == heart_beat_counter2);
5802
5803 return rval;
5804 }
5805
5806 /* Work: Perform NIC Core Reset handling */
5807 void
qla83xx_nic_core_reset_work(struct work_struct * work)5808 qla83xx_nic_core_reset_work(struct work_struct *work)
5809 {
5810 struct qla_hw_data *ha =
5811 container_of(work, struct qla_hw_data, nic_core_reset);
5812 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5813 uint32_t dev_state = 0;
5814
5815 if (IS_QLA2031(ha)) {
5816 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
5817 ql_log(ql_log_warn, base_vha, 0xb081,
5818 "Failed to dump mctp\n");
5819 return;
5820 }
5821
5822 if (!ha->flags.nic_core_reset_hdlr_active) {
5823 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
5824 qla83xx_idc_lock(base_vha, 0);
5825 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5826 &dev_state);
5827 qla83xx_idc_unlock(base_vha, 0);
5828 if (dev_state != QLA8XXX_DEV_NEED_RESET) {
5829 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
5830 "Nic Core f/w is alive.\n");
5831 return;
5832 }
5833 }
5834
5835 ha->flags.nic_core_reset_hdlr_active = 1;
5836 if (qla83xx_nic_core_reset(base_vha)) {
5837 /* NIC Core reset failed. */
5838 ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
5839 "NIC Core reset failed.\n");
5840 }
5841 ha->flags.nic_core_reset_hdlr_active = 0;
5842 }
5843 }
5844
5845 /* Work: Handle 8200 IDC aens */
5846 void
qla83xx_service_idc_aen(struct work_struct * work)5847 qla83xx_service_idc_aen(struct work_struct *work)
5848 {
5849 struct qla_hw_data *ha =
5850 container_of(work, struct qla_hw_data, idc_aen);
5851 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5852 uint32_t dev_state, idc_control;
5853
5854 qla83xx_idc_lock(base_vha, 0);
5855 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5856 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
5857 qla83xx_idc_unlock(base_vha, 0);
5858 if (dev_state == QLA8XXX_DEV_NEED_RESET) {
5859 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
5860 ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
5861 "Application requested NIC Core Reset.\n");
5862 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
5863 } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
5864 QLA_SUCCESS) {
5865 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
5866 "Other protocol driver requested NIC Core Reset.\n");
5867 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
5868 }
5869 } else if (dev_state == QLA8XXX_DEV_FAILED ||
5870 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
5871 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
5872 }
5873 }
5874
5875 /*
5876 * Control the frequency of IDC lock retries
5877 */
5878 #define QLA83XX_WAIT_LOGIC_MS 100
5879
5880 static int
qla83xx_force_lock_recovery(scsi_qla_host_t * base_vha)5881 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
5882 {
5883 int rval;
5884 uint32_t data;
5885 uint32_t idc_lck_rcvry_stage_mask = 0x3;
5886 uint32_t idc_lck_rcvry_owner_mask = 0x3c;
5887 struct qla_hw_data *ha = base_vha->hw;
5888
5889 ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
5890 "Trying force recovery of the IDC lock.\n");
5891
5892 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
5893 if (rval)
5894 return rval;
5895
5896 if ((data & idc_lck_rcvry_stage_mask) > 0) {
5897 return QLA_SUCCESS;
5898 } else {
5899 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
5900 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
5901 data);
5902 if (rval)
5903 return rval;
5904
5905 msleep(200);
5906
5907 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
5908 &data);
5909 if (rval)
5910 return rval;
5911
5912 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
5913 data &= (IDC_LOCK_RECOVERY_STAGE2 |
5914 ~(idc_lck_rcvry_stage_mask));
5915 rval = qla83xx_wr_reg(base_vha,
5916 QLA83XX_IDC_LOCK_RECOVERY, data);
5917 if (rval)
5918 return rval;
5919
5920 /* Forcefully perform IDC UnLock */
5921 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
5922 &data);
5923 if (rval)
5924 return rval;
5925 /* Clear lock-id by setting 0xff */
5926 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5927 0xff);
5928 if (rval)
5929 return rval;
5930 /* Clear lock-recovery by setting 0x0 */
5931 rval = qla83xx_wr_reg(base_vha,
5932 QLA83XX_IDC_LOCK_RECOVERY, 0x0);
5933 if (rval)
5934 return rval;
5935 } else
5936 return QLA_SUCCESS;
5937 }
5938
5939 return rval;
5940 }
5941
5942 static int
qla83xx_idc_lock_recovery(scsi_qla_host_t * base_vha)5943 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
5944 {
5945 int rval = QLA_SUCCESS;
5946 uint32_t o_drv_lockid, n_drv_lockid;
5947 unsigned long lock_recovery_timeout;
5948
5949 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
5950 retry_lockid:
5951 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
5952 if (rval)
5953 goto exit;
5954
5955 /* MAX wait time before forcing IDC Lock recovery = 2 secs */
5956 if (time_after_eq(jiffies, lock_recovery_timeout)) {
5957 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
5958 return QLA_SUCCESS;
5959 else
5960 return QLA_FUNCTION_FAILED;
5961 }
5962
5963 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
5964 if (rval)
5965 goto exit;
5966
5967 if (o_drv_lockid == n_drv_lockid) {
5968 msleep(QLA83XX_WAIT_LOGIC_MS);
5969 goto retry_lockid;
5970 } else
5971 return QLA_SUCCESS;
5972
5973 exit:
5974 return rval;
5975 }
5976
5977 /*
5978 * Context: task, can sleep
5979 */
5980 void
qla83xx_idc_lock(scsi_qla_host_t * base_vha,uint16_t requester_id)5981 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
5982 {
5983 uint32_t data;
5984 uint32_t lock_owner;
5985 struct qla_hw_data *ha = base_vha->hw;
5986
5987 might_sleep();
5988
5989 /* IDC-lock implementation using driver-lock/lock-id remote registers */
5990 retry_lock:
5991 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
5992 == QLA_SUCCESS) {
5993 if (data) {
5994 /* Setting lock-id to our function-number */
5995 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5996 ha->portnum);
5997 } else {
5998 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5999 &lock_owner);
6000 ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
6001 "Failed to acquire IDC lock, acquired by %d, "
6002 "retrying...\n", lock_owner);
6003
6004 /* Retry/Perform IDC-Lock recovery */
6005 if (qla83xx_idc_lock_recovery(base_vha)
6006 == QLA_SUCCESS) {
6007 msleep(QLA83XX_WAIT_LOGIC_MS);
6008 goto retry_lock;
6009 } else
6010 ql_log(ql_log_warn, base_vha, 0xb075,
6011 "IDC Lock recovery FAILED.\n");
6012 }
6013
6014 }
6015
6016 return;
6017 }
6018
6019 static bool
qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host * vha,struct purex_entry_24xx * purex)6020 qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
6021 struct purex_entry_24xx *purex)
6022 {
6023 char fwstr[16];
6024 u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
6025 struct port_database_24xx *pdb;
6026
6027 /* Domain Controller is always logged-out. */
6028 /* if RDP request is not from Domain Controller: */
6029 if (sid != 0xfffc01)
6030 return false;
6031
6032 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
6033
6034 pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
6035 if (!pdb) {
6036 ql_dbg(ql_dbg_init, vha, 0x0181,
6037 "%s: Failed allocate pdb\n", __func__);
6038 } else if (qla24xx_get_port_database(vha,
6039 le16_to_cpu(purex->nport_handle), pdb)) {
6040 ql_dbg(ql_dbg_init, vha, 0x0181,
6041 "%s: Failed get pdb sid=%x\n", __func__, sid);
6042 } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
6043 pdb->current_login_state != PDS_PRLI_COMPLETE) {
6044 ql_dbg(ql_dbg_init, vha, 0x0181,
6045 "%s: Port not logged in sid=%#x\n", __func__, sid);
6046 } else {
6047 /* RDP request is from logged in port */
6048 kfree(pdb);
6049 return false;
6050 }
6051 kfree(pdb);
6052
6053 vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
6054 fwstr[strcspn(fwstr, " ")] = 0;
6055 /* if FW version allows RDP response length upto 2048 bytes: */
6056 if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
6057 return false;
6058
6059 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
6060
6061 /* RDP response length is to be reduced to maximum 256 bytes */
6062 return true;
6063 }
6064
6065 /*
6066 * Function Name: qla24xx_process_purex_iocb
6067 *
6068 * Description:
6069 * Prepare a RDP response and send to Fabric switch
6070 *
6071 * PARAMETERS:
6072 * vha: SCSI qla host
6073 * purex: RDP request received by HBA
6074 */
qla24xx_process_purex_rdp(struct scsi_qla_host * vha,struct purex_item * item)6075 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
6076 struct purex_item *item)
6077 {
6078 struct qla_hw_data *ha = vha->hw;
6079 struct purex_entry_24xx *purex =
6080 (struct purex_entry_24xx *)&item->iocb;
6081 dma_addr_t rsp_els_dma;
6082 dma_addr_t rsp_payload_dma;
6083 dma_addr_t stat_dma;
6084 dma_addr_t sfp_dma;
6085 struct els_entry_24xx *rsp_els = NULL;
6086 struct rdp_rsp_payload *rsp_payload = NULL;
6087 struct link_statistics *stat = NULL;
6088 uint8_t *sfp = NULL;
6089 uint16_t sfp_flags = 0;
6090 uint rsp_payload_length = sizeof(*rsp_payload);
6091 int rval;
6092
6093 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
6094 "%s: Enter\n", __func__);
6095
6096 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
6097 "-------- ELS REQ -------\n");
6098 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
6099 purex, sizeof(*purex));
6100
6101 if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
6102 rsp_payload_length =
6103 offsetof(typeof(*rsp_payload), optical_elmt_desc);
6104 ql_dbg(ql_dbg_init, vha, 0x0181,
6105 "Reducing RSP payload length to %u bytes...\n",
6106 rsp_payload_length);
6107 }
6108
6109 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6110 &rsp_els_dma, GFP_KERNEL);
6111 if (!rsp_els) {
6112 ql_log(ql_log_warn, vha, 0x0183,
6113 "Failed allocate dma buffer ELS RSP.\n");
6114 goto dealloc;
6115 }
6116
6117 rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6118 &rsp_payload_dma, GFP_KERNEL);
6119 if (!rsp_payload) {
6120 ql_log(ql_log_warn, vha, 0x0184,
6121 "Failed allocate dma buffer ELS RSP payload.\n");
6122 goto dealloc;
6123 }
6124
6125 sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6126 &sfp_dma, GFP_KERNEL);
6127
6128 stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
6129 &stat_dma, GFP_KERNEL);
6130
6131 /* Prepare Response IOCB */
6132 rsp_els->entry_type = ELS_IOCB_TYPE;
6133 rsp_els->entry_count = 1;
6134 rsp_els->sys_define = 0;
6135 rsp_els->entry_status = 0;
6136 rsp_els->handle = 0;
6137 rsp_els->nport_handle = purex->nport_handle;
6138 rsp_els->tx_dsd_count = cpu_to_le16(1);
6139 rsp_els->vp_index = purex->vp_idx;
6140 rsp_els->sof_type = EST_SOFI3;
6141 rsp_els->rx_xchg_address = purex->rx_xchg_addr;
6142 rsp_els->rx_dsd_count = 0;
6143 rsp_els->opcode = purex->els_frame_payload[0];
6144
6145 rsp_els->d_id[0] = purex->s_id[0];
6146 rsp_els->d_id[1] = purex->s_id[1];
6147 rsp_els->d_id[2] = purex->s_id[2];
6148
6149 rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
6150 rsp_els->rx_byte_count = 0;
6151 rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
6152
6153 put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
6154 rsp_els->tx_len = rsp_els->tx_byte_count;
6155
6156 rsp_els->rx_address = 0;
6157 rsp_els->rx_len = 0;
6158
6159 /* Prepare Response Payload */
6160 rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
6161 rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
6162 sizeof(rsp_payload->hdr));
6163
6164 /* Link service Request Info Descriptor */
6165 rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
6166 rsp_payload->ls_req_info_desc.desc_len =
6167 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
6168 rsp_payload->ls_req_info_desc.req_payload_word_0 =
6169 cpu_to_be32p((uint32_t *)purex->els_frame_payload);
6170
6171 /* Link service Request Info Descriptor 2 */
6172 rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
6173 rsp_payload->ls_req_info_desc2.desc_len =
6174 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
6175 rsp_payload->ls_req_info_desc2.req_payload_word_0 =
6176 cpu_to_be32p((uint32_t *)purex->els_frame_payload);
6177
6178
6179 rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
6180 rsp_payload->sfp_diag_desc.desc_len =
6181 cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
6182
6183 if (sfp) {
6184 /* SFP Flags */
6185 memset(sfp, 0, SFP_RTDI_LEN);
6186 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
6187 if (!rval) {
6188 /* SFP Flags bits 3-0: Port Tx Laser Type */
6189 if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
6190 sfp_flags |= BIT_0; /* short wave */
6191 else if (sfp[0] & BIT_1)
6192 sfp_flags |= BIT_1; /* long wave 1310nm */
6193 else if (sfp[1] & BIT_4)
6194 sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
6195 }
6196
6197 /* SFP Type */
6198 memset(sfp, 0, SFP_RTDI_LEN);
6199 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
6200 if (!rval) {
6201 sfp_flags |= BIT_4; /* optical */
6202 if (sfp[0] == 0x3)
6203 sfp_flags |= BIT_6; /* sfp+ */
6204 }
6205
6206 rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
6207
6208 /* SFP Diagnostics */
6209 memset(sfp, 0, SFP_RTDI_LEN);
6210 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
6211 if (!rval) {
6212 __be16 *trx = (__force __be16 *)sfp; /* already be16 */
6213 rsp_payload->sfp_diag_desc.temperature = trx[0];
6214 rsp_payload->sfp_diag_desc.vcc = trx[1];
6215 rsp_payload->sfp_diag_desc.tx_bias = trx[2];
6216 rsp_payload->sfp_diag_desc.tx_power = trx[3];
6217 rsp_payload->sfp_diag_desc.rx_power = trx[4];
6218 }
6219 }
6220
6221 /* Port Speed Descriptor */
6222 rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
6223 rsp_payload->port_speed_desc.desc_len =
6224 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
6225 rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
6226 qla25xx_fdmi_port_speed_capability(ha));
6227 rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
6228 qla25xx_fdmi_port_speed_currently(ha));
6229
6230 /* Link Error Status Descriptor */
6231 rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
6232 rsp_payload->ls_err_desc.desc_len =
6233 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
6234
6235 if (stat) {
6236 rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
6237 if (!rval) {
6238 rsp_payload->ls_err_desc.link_fail_cnt =
6239 cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
6240 rsp_payload->ls_err_desc.loss_sync_cnt =
6241 cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
6242 rsp_payload->ls_err_desc.loss_sig_cnt =
6243 cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
6244 rsp_payload->ls_err_desc.prim_seq_err_cnt =
6245 cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
6246 rsp_payload->ls_err_desc.inval_xmit_word_cnt =
6247 cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
6248 rsp_payload->ls_err_desc.inval_crc_cnt =
6249 cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
6250 rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
6251 }
6252 }
6253
6254 /* Portname Descriptor */
6255 rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
6256 rsp_payload->port_name_diag_desc.desc_len =
6257 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
6258 memcpy(rsp_payload->port_name_diag_desc.WWNN,
6259 vha->node_name,
6260 sizeof(rsp_payload->port_name_diag_desc.WWNN));
6261 memcpy(rsp_payload->port_name_diag_desc.WWPN,
6262 vha->port_name,
6263 sizeof(rsp_payload->port_name_diag_desc.WWPN));
6264
6265 /* F-Port Portname Descriptor */
6266 rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
6267 rsp_payload->port_name_direct_desc.desc_len =
6268 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
6269 memcpy(rsp_payload->port_name_direct_desc.WWNN,
6270 vha->fabric_node_name,
6271 sizeof(rsp_payload->port_name_direct_desc.WWNN));
6272 memcpy(rsp_payload->port_name_direct_desc.WWPN,
6273 vha->fabric_port_name,
6274 sizeof(rsp_payload->port_name_direct_desc.WWPN));
6275
6276 /* Bufer Credit Descriptor */
6277 rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
6278 rsp_payload->buffer_credit_desc.desc_len =
6279 cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
6280 rsp_payload->buffer_credit_desc.fcport_b2b = 0;
6281 rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
6282 rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
6283
6284 if (ha->flags.plogi_template_valid) {
6285 uint32_t tmp =
6286 be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
6287 rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
6288 }
6289
6290 if (rsp_payload_length < sizeof(*rsp_payload))
6291 goto send;
6292
6293 /* Optical Element Descriptor, Temperature */
6294 rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
6295 rsp_payload->optical_elmt_desc[0].desc_len =
6296 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6297 /* Optical Element Descriptor, Voltage */
6298 rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
6299 rsp_payload->optical_elmt_desc[1].desc_len =
6300 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6301 /* Optical Element Descriptor, Tx Bias Current */
6302 rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
6303 rsp_payload->optical_elmt_desc[2].desc_len =
6304 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6305 /* Optical Element Descriptor, Tx Power */
6306 rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
6307 rsp_payload->optical_elmt_desc[3].desc_len =
6308 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6309 /* Optical Element Descriptor, Rx Power */
6310 rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
6311 rsp_payload->optical_elmt_desc[4].desc_len =
6312 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6313
6314 if (sfp) {
6315 memset(sfp, 0, SFP_RTDI_LEN);
6316 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
6317 if (!rval) {
6318 __be16 *trx = (__force __be16 *)sfp; /* already be16 */
6319
6320 /* Optical Element Descriptor, Temperature */
6321 rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
6322 rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
6323 rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
6324 rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
6325 rsp_payload->optical_elmt_desc[0].element_flags =
6326 cpu_to_be32(1 << 28);
6327
6328 /* Optical Element Descriptor, Voltage */
6329 rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
6330 rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
6331 rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
6332 rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
6333 rsp_payload->optical_elmt_desc[1].element_flags =
6334 cpu_to_be32(2 << 28);
6335
6336 /* Optical Element Descriptor, Tx Bias Current */
6337 rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
6338 rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
6339 rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
6340 rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
6341 rsp_payload->optical_elmt_desc[2].element_flags =
6342 cpu_to_be32(3 << 28);
6343
6344 /* Optical Element Descriptor, Tx Power */
6345 rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
6346 rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
6347 rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
6348 rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
6349 rsp_payload->optical_elmt_desc[3].element_flags =
6350 cpu_to_be32(4 << 28);
6351
6352 /* Optical Element Descriptor, Rx Power */
6353 rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
6354 rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
6355 rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
6356 rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
6357 rsp_payload->optical_elmt_desc[4].element_flags =
6358 cpu_to_be32(5 << 28);
6359 }
6360
6361 memset(sfp, 0, SFP_RTDI_LEN);
6362 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
6363 if (!rval) {
6364 /* Temperature high/low alarm/warning */
6365 rsp_payload->optical_elmt_desc[0].element_flags |=
6366 cpu_to_be32(
6367 (sfp[0] >> 7 & 1) << 3 |
6368 (sfp[0] >> 6 & 1) << 2 |
6369 (sfp[4] >> 7 & 1) << 1 |
6370 (sfp[4] >> 6 & 1) << 0);
6371
6372 /* Voltage high/low alarm/warning */
6373 rsp_payload->optical_elmt_desc[1].element_flags |=
6374 cpu_to_be32(
6375 (sfp[0] >> 5 & 1) << 3 |
6376 (sfp[0] >> 4 & 1) << 2 |
6377 (sfp[4] >> 5 & 1) << 1 |
6378 (sfp[4] >> 4 & 1) << 0);
6379
6380 /* Tx Bias Current high/low alarm/warning */
6381 rsp_payload->optical_elmt_desc[2].element_flags |=
6382 cpu_to_be32(
6383 (sfp[0] >> 3 & 1) << 3 |
6384 (sfp[0] >> 2 & 1) << 2 |
6385 (sfp[4] >> 3 & 1) << 1 |
6386 (sfp[4] >> 2 & 1) << 0);
6387
6388 /* Tx Power high/low alarm/warning */
6389 rsp_payload->optical_elmt_desc[3].element_flags |=
6390 cpu_to_be32(
6391 (sfp[0] >> 1 & 1) << 3 |
6392 (sfp[0] >> 0 & 1) << 2 |
6393 (sfp[4] >> 1 & 1) << 1 |
6394 (sfp[4] >> 0 & 1) << 0);
6395
6396 /* Rx Power high/low alarm/warning */
6397 rsp_payload->optical_elmt_desc[4].element_flags |=
6398 cpu_to_be32(
6399 (sfp[1] >> 7 & 1) << 3 |
6400 (sfp[1] >> 6 & 1) << 2 |
6401 (sfp[5] >> 7 & 1) << 1 |
6402 (sfp[5] >> 6 & 1) << 0);
6403 }
6404 }
6405
6406 /* Optical Product Data Descriptor */
6407 rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
6408 rsp_payload->optical_prod_desc.desc_len =
6409 cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
6410
6411 if (sfp) {
6412 memset(sfp, 0, SFP_RTDI_LEN);
6413 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
6414 if (!rval) {
6415 memcpy(rsp_payload->optical_prod_desc.vendor_name,
6416 sfp + 0,
6417 sizeof(rsp_payload->optical_prod_desc.vendor_name));
6418 memcpy(rsp_payload->optical_prod_desc.part_number,
6419 sfp + 20,
6420 sizeof(rsp_payload->optical_prod_desc.part_number));
6421 memcpy(rsp_payload->optical_prod_desc.revision,
6422 sfp + 36,
6423 sizeof(rsp_payload->optical_prod_desc.revision));
6424 memcpy(rsp_payload->optical_prod_desc.serial_number,
6425 sfp + 48,
6426 sizeof(rsp_payload->optical_prod_desc.serial_number));
6427 }
6428
6429 memset(sfp, 0, SFP_RTDI_LEN);
6430 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
6431 if (!rval) {
6432 memcpy(rsp_payload->optical_prod_desc.date,
6433 sfp + 0,
6434 sizeof(rsp_payload->optical_prod_desc.date));
6435 }
6436 }
6437
6438 send:
6439 ql_dbg(ql_dbg_init, vha, 0x0183,
6440 "Sending ELS Response to RDP Request...\n");
6441 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
6442 "-------- ELS RSP -------\n");
6443 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
6444 rsp_els, sizeof(*rsp_els));
6445 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
6446 "-------- ELS RSP PAYLOAD -------\n");
6447 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
6448 rsp_payload, rsp_payload_length);
6449
6450 rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
6451
6452 if (rval) {
6453 ql_log(ql_log_warn, vha, 0x0188,
6454 "%s: iocb failed to execute -> %x\n", __func__, rval);
6455 } else if (rsp_els->comp_status) {
6456 ql_log(ql_log_warn, vha, 0x0189,
6457 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
6458 __func__, rsp_els->comp_status,
6459 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
6460 } else {
6461 ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
6462 }
6463
6464 dealloc:
6465 if (stat)
6466 dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
6467 stat, stat_dma);
6468 if (sfp)
6469 dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6470 sfp, sfp_dma);
6471 if (rsp_payload)
6472 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6473 rsp_payload, rsp_payload_dma);
6474 if (rsp_els)
6475 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6476 rsp_els, rsp_els_dma);
6477 }
6478
6479 void
qla24xx_free_purex_item(struct purex_item * item)6480 qla24xx_free_purex_item(struct purex_item *item)
6481 {
6482 if (item == &item->vha->default_item)
6483 memset(&item->vha->default_item, 0, sizeof(struct purex_item));
6484 else
6485 kfree(item);
6486 }
6487
qla24xx_process_purex_list(struct purex_list * list)6488 void qla24xx_process_purex_list(struct purex_list *list)
6489 {
6490 struct list_head head = LIST_HEAD_INIT(head);
6491 struct purex_item *item, *next;
6492 ulong flags;
6493
6494 spin_lock_irqsave(&list->lock, flags);
6495 list_splice_init(&list->head, &head);
6496 spin_unlock_irqrestore(&list->lock, flags);
6497
6498 list_for_each_entry_safe(item, next, &head, list) {
6499 list_del(&item->list);
6500 item->process_item(item->vha, item);
6501 qla24xx_free_purex_item(item);
6502 }
6503 }
6504
6505 /*
6506 * Context: task, can sleep
6507 */
6508 void
qla83xx_idc_unlock(scsi_qla_host_t * base_vha,uint16_t requester_id)6509 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
6510 {
6511 #if 0
6512 uint16_t options = (requester_id << 15) | BIT_7;
6513 #endif
6514 uint16_t retry;
6515 uint32_t data;
6516 struct qla_hw_data *ha = base_vha->hw;
6517
6518 might_sleep();
6519
6520 /* IDC-unlock implementation using driver-unlock/lock-id
6521 * remote registers
6522 */
6523 retry = 0;
6524 retry_unlock:
6525 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
6526 == QLA_SUCCESS) {
6527 if (data == ha->portnum) {
6528 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
6529 /* Clearing lock-id by setting 0xff */
6530 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
6531 } else if (retry < 10) {
6532 /* SV: XXX: IDC unlock retrying needed here? */
6533
6534 /* Retry for IDC-unlock */
6535 msleep(QLA83XX_WAIT_LOGIC_MS);
6536 retry++;
6537 ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
6538 "Failed to release IDC lock, retrying=%d\n", retry);
6539 goto retry_unlock;
6540 }
6541 } else if (retry < 10) {
6542 /* Retry for IDC-unlock */
6543 msleep(QLA83XX_WAIT_LOGIC_MS);
6544 retry++;
6545 ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
6546 "Failed to read drv-lockid, retrying=%d\n", retry);
6547 goto retry_unlock;
6548 }
6549
6550 return;
6551
6552 #if 0
6553 /* XXX: IDC-unlock implementation using access-control mbx */
6554 retry = 0;
6555 retry_unlock2:
6556 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
6557 if (retry < 10) {
6558 /* Retry for IDC-unlock */
6559 msleep(QLA83XX_WAIT_LOGIC_MS);
6560 retry++;
6561 ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
6562 "Failed to release IDC lock, retrying=%d\n", retry);
6563 goto retry_unlock2;
6564 }
6565 }
6566
6567 return;
6568 #endif
6569 }
6570
6571 int
__qla83xx_set_drv_presence(scsi_qla_host_t * vha)6572 __qla83xx_set_drv_presence(scsi_qla_host_t *vha)
6573 {
6574 int rval = QLA_SUCCESS;
6575 struct qla_hw_data *ha = vha->hw;
6576 uint32_t drv_presence;
6577
6578 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6579 if (rval == QLA_SUCCESS) {
6580 drv_presence |= (1 << ha->portnum);
6581 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6582 drv_presence);
6583 }
6584
6585 return rval;
6586 }
6587
6588 int
qla83xx_set_drv_presence(scsi_qla_host_t * vha)6589 qla83xx_set_drv_presence(scsi_qla_host_t *vha)
6590 {
6591 int rval = QLA_SUCCESS;
6592
6593 qla83xx_idc_lock(vha, 0);
6594 rval = __qla83xx_set_drv_presence(vha);
6595 qla83xx_idc_unlock(vha, 0);
6596
6597 return rval;
6598 }
6599
6600 int
__qla83xx_clear_drv_presence(scsi_qla_host_t * vha)6601 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
6602 {
6603 int rval = QLA_SUCCESS;
6604 struct qla_hw_data *ha = vha->hw;
6605 uint32_t drv_presence;
6606
6607 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6608 if (rval == QLA_SUCCESS) {
6609 drv_presence &= ~(1 << ha->portnum);
6610 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6611 drv_presence);
6612 }
6613
6614 return rval;
6615 }
6616
6617 int
qla83xx_clear_drv_presence(scsi_qla_host_t * vha)6618 qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
6619 {
6620 int rval = QLA_SUCCESS;
6621
6622 qla83xx_idc_lock(vha, 0);
6623 rval = __qla83xx_clear_drv_presence(vha);
6624 qla83xx_idc_unlock(vha, 0);
6625
6626 return rval;
6627 }
6628
6629 static void
qla83xx_need_reset_handler(scsi_qla_host_t * vha)6630 qla83xx_need_reset_handler(scsi_qla_host_t *vha)
6631 {
6632 struct qla_hw_data *ha = vha->hw;
6633 uint32_t drv_ack, drv_presence;
6634 unsigned long ack_timeout;
6635
6636 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
6637 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
6638 while (1) {
6639 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6640 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6641 if ((drv_ack & drv_presence) == drv_presence)
6642 break;
6643
6644 if (time_after_eq(jiffies, ack_timeout)) {
6645 ql_log(ql_log_warn, vha, 0xb067,
6646 "RESET ACK TIMEOUT! drv_presence=0x%x "
6647 "drv_ack=0x%x\n", drv_presence, drv_ack);
6648 /*
6649 * The function(s) which did not ack in time are forced
6650 * to withdraw any further participation in the IDC
6651 * reset.
6652 */
6653 if (drv_ack != drv_presence)
6654 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6655 drv_ack);
6656 break;
6657 }
6658
6659 qla83xx_idc_unlock(vha, 0);
6660 msleep(1000);
6661 qla83xx_idc_lock(vha, 0);
6662 }
6663
6664 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
6665 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
6666 }
6667
6668 static int
qla83xx_device_bootstrap(scsi_qla_host_t * vha)6669 qla83xx_device_bootstrap(scsi_qla_host_t *vha)
6670 {
6671 int rval = QLA_SUCCESS;
6672 uint32_t idc_control;
6673
6674 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
6675 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
6676
6677 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
6678 __qla83xx_get_idc_control(vha, &idc_control);
6679 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
6680 __qla83xx_set_idc_control(vha, 0);
6681
6682 qla83xx_idc_unlock(vha, 0);
6683 rval = qla83xx_restart_nic_firmware(vha);
6684 qla83xx_idc_lock(vha, 0);
6685
6686 if (rval != QLA_SUCCESS) {
6687 ql_log(ql_log_fatal, vha, 0xb06a,
6688 "Failed to restart NIC f/w.\n");
6689 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
6690 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
6691 } else {
6692 ql_dbg(ql_dbg_p3p, vha, 0xb06c,
6693 "Success in restarting nic f/w.\n");
6694 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
6695 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
6696 }
6697
6698 return rval;
6699 }
6700
6701 /* Assumes idc_lock always held on entry */
6702 int
qla83xx_idc_state_handler(scsi_qla_host_t * base_vha)6703 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
6704 {
6705 struct qla_hw_data *ha = base_vha->hw;
6706 int rval = QLA_SUCCESS;
6707 unsigned long dev_init_timeout;
6708 uint32_t dev_state;
6709
6710 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
6711 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
6712
6713 while (1) {
6714
6715 if (time_after_eq(jiffies, dev_init_timeout)) {
6716 ql_log(ql_log_warn, base_vha, 0xb06e,
6717 "Initialization TIMEOUT!\n");
6718 /* Init timeout. Disable further NIC Core
6719 * communication.
6720 */
6721 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
6722 QLA8XXX_DEV_FAILED);
6723 ql_log(ql_log_info, base_vha, 0xb06f,
6724 "HW State: FAILED.\n");
6725 }
6726
6727 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6728 switch (dev_state) {
6729 case QLA8XXX_DEV_READY:
6730 if (ha->flags.nic_core_reset_owner)
6731 qla83xx_idc_audit(base_vha,
6732 IDC_AUDIT_COMPLETION);
6733 ha->flags.nic_core_reset_owner = 0;
6734 ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
6735 "Reset_owner reset by 0x%x.\n",
6736 ha->portnum);
6737 goto exit;
6738 case QLA8XXX_DEV_COLD:
6739 if (ha->flags.nic_core_reset_owner)
6740 rval = qla83xx_device_bootstrap(base_vha);
6741 else {
6742 /* Wait for AEN to change device-state */
6743 qla83xx_idc_unlock(base_vha, 0);
6744 msleep(1000);
6745 qla83xx_idc_lock(base_vha, 0);
6746 }
6747 break;
6748 case QLA8XXX_DEV_INITIALIZING:
6749 /* Wait for AEN to change device-state */
6750 qla83xx_idc_unlock(base_vha, 0);
6751 msleep(1000);
6752 qla83xx_idc_lock(base_vha, 0);
6753 break;
6754 case QLA8XXX_DEV_NEED_RESET:
6755 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
6756 qla83xx_need_reset_handler(base_vha);
6757 else {
6758 /* Wait for AEN to change device-state */
6759 qla83xx_idc_unlock(base_vha, 0);
6760 msleep(1000);
6761 qla83xx_idc_lock(base_vha, 0);
6762 }
6763 /* reset timeout value after need reset handler */
6764 dev_init_timeout = jiffies +
6765 (ha->fcoe_dev_init_timeout * HZ);
6766 break;
6767 case QLA8XXX_DEV_NEED_QUIESCENT:
6768 /* XXX: DEBUG for now */
6769 qla83xx_idc_unlock(base_vha, 0);
6770 msleep(1000);
6771 qla83xx_idc_lock(base_vha, 0);
6772 break;
6773 case QLA8XXX_DEV_QUIESCENT:
6774 /* XXX: DEBUG for now */
6775 if (ha->flags.quiesce_owner)
6776 goto exit;
6777
6778 qla83xx_idc_unlock(base_vha, 0);
6779 msleep(1000);
6780 qla83xx_idc_lock(base_vha, 0);
6781 dev_init_timeout = jiffies +
6782 (ha->fcoe_dev_init_timeout * HZ);
6783 break;
6784 case QLA8XXX_DEV_FAILED:
6785 if (ha->flags.nic_core_reset_owner)
6786 qla83xx_idc_audit(base_vha,
6787 IDC_AUDIT_COMPLETION);
6788 ha->flags.nic_core_reset_owner = 0;
6789 __qla83xx_clear_drv_presence(base_vha);
6790 qla83xx_idc_unlock(base_vha, 0);
6791 qla8xxx_dev_failed_handler(base_vha);
6792 rval = QLA_FUNCTION_FAILED;
6793 qla83xx_idc_lock(base_vha, 0);
6794 goto exit;
6795 case QLA8XXX_BAD_VALUE:
6796 qla83xx_idc_unlock(base_vha, 0);
6797 msleep(1000);
6798 qla83xx_idc_lock(base_vha, 0);
6799 break;
6800 default:
6801 ql_log(ql_log_warn, base_vha, 0xb071,
6802 "Unknown Device State: %x.\n", dev_state);
6803 qla83xx_idc_unlock(base_vha, 0);
6804 qla8xxx_dev_failed_handler(base_vha);
6805 rval = QLA_FUNCTION_FAILED;
6806 qla83xx_idc_lock(base_vha, 0);
6807 goto exit;
6808 }
6809 }
6810
6811 exit:
6812 return rval;
6813 }
6814
6815 void
qla2x00_disable_board_on_pci_error(struct work_struct * work)6816 qla2x00_disable_board_on_pci_error(struct work_struct *work)
6817 {
6818 struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
6819 board_disable);
6820 struct pci_dev *pdev = ha->pdev;
6821 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6822
6823 ql_log(ql_log_warn, base_vha, 0x015b,
6824 "Disabling adapter.\n");
6825
6826 if (!atomic_read(&pdev->enable_cnt)) {
6827 ql_log(ql_log_info, base_vha, 0xfffc,
6828 "PCI device disabled, no action req for PCI error=%lx\n",
6829 base_vha->pci_flags);
6830 return;
6831 }
6832
6833 /*
6834 * if UNLOADING flag is already set, then continue unload,
6835 * where it was set first.
6836 */
6837 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
6838 return;
6839
6840 qla2x00_wait_for_sess_deletion(base_vha);
6841
6842 qla2x00_delete_all_vps(ha, base_vha);
6843
6844 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
6845
6846 qla2x00_dfs_remove(base_vha);
6847
6848 qla84xx_put_chip(base_vha);
6849
6850 if (base_vha->timer_active)
6851 qla2x00_stop_timer(base_vha);
6852
6853 base_vha->flags.online = 0;
6854
6855 qla2x00_destroy_deferred_work(ha);
6856
6857 /*
6858 * Do not try to stop beacon blink as it will issue a mailbox
6859 * command.
6860 */
6861 qla2x00_free_sysfs_attr(base_vha, false);
6862
6863 fc_remove_host(base_vha->host);
6864
6865 scsi_remove_host(base_vha->host);
6866
6867 base_vha->flags.init_done = 0;
6868 qla25xx_delete_queues(base_vha);
6869 qla2x00_free_fcports(base_vha);
6870 qla2x00_free_irqs(base_vha);
6871 qla2x00_mem_free(ha);
6872 qla82xx_md_free(base_vha);
6873 qla2x00_free_queues(ha);
6874
6875 qla2x00_unmap_iobases(ha);
6876
6877 pci_release_selected_regions(ha->pdev, ha->bars);
6878 pci_disable_device(pdev);
6879
6880 /*
6881 * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
6882 */
6883 }
6884
6885 /**************************************************************************
6886 * qla2x00_do_dpc
6887 * This kernel thread is a task that is schedule by the interrupt handler
6888 * to perform the background processing for interrupts.
6889 *
6890 * Notes:
6891 * This task always run in the context of a kernel thread. It
6892 * is kick-off by the driver's detect code and starts up
6893 * up one per adapter. It immediately goes to sleep and waits for
6894 * some fibre event. When either the interrupt handler or
6895 * the timer routine detects a event it will one of the task
6896 * bits then wake us up.
6897 **************************************************************************/
6898 static int
qla2x00_do_dpc(void * data)6899 qla2x00_do_dpc(void *data)
6900 {
6901 scsi_qla_host_t *base_vha;
6902 struct qla_hw_data *ha;
6903 uint32_t online;
6904 struct qla_qpair *qpair;
6905
6906 ha = (struct qla_hw_data *)data;
6907 base_vha = pci_get_drvdata(ha->pdev);
6908
6909 set_user_nice(current, MIN_NICE);
6910
6911 set_current_state(TASK_INTERRUPTIBLE);
6912 while (!kthread_should_stop()) {
6913 ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
6914 "DPC handler sleeping.\n");
6915
6916 schedule();
6917
6918 if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
6919 qla_pci_set_eeh_busy(base_vha);
6920
6921 if (!base_vha->flags.init_done || ha->flags.mbox_busy)
6922 goto end_loop;
6923
6924 if (ha->flags.eeh_busy) {
6925 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
6926 "eeh_busy=%d.\n", ha->flags.eeh_busy);
6927 goto end_loop;
6928 }
6929
6930 ha->dpc_active = 1;
6931
6932 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
6933 "DPC handler waking up, dpc_flags=0x%lx.\n",
6934 base_vha->dpc_flags);
6935
6936 if (test_bit(UNLOADING, &base_vha->dpc_flags))
6937 break;
6938
6939 if (IS_P3P_TYPE(ha)) {
6940 if (IS_QLA8044(ha)) {
6941 if (test_and_clear_bit(ISP_UNRECOVERABLE,
6942 &base_vha->dpc_flags)) {
6943 qla8044_idc_lock(ha);
6944 qla8044_wr_direct(base_vha,
6945 QLA8044_CRB_DEV_STATE_INDEX,
6946 QLA8XXX_DEV_FAILED);
6947 qla8044_idc_unlock(ha);
6948 ql_log(ql_log_info, base_vha, 0x4004,
6949 "HW State: FAILED.\n");
6950 qla8044_device_state_handler(base_vha);
6951 continue;
6952 }
6953
6954 } else {
6955 if (test_and_clear_bit(ISP_UNRECOVERABLE,
6956 &base_vha->dpc_flags)) {
6957 qla82xx_idc_lock(ha);
6958 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6959 QLA8XXX_DEV_FAILED);
6960 qla82xx_idc_unlock(ha);
6961 ql_log(ql_log_info, base_vha, 0x0151,
6962 "HW State: FAILED.\n");
6963 qla82xx_device_state_handler(base_vha);
6964 continue;
6965 }
6966 }
6967
6968 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
6969 &base_vha->dpc_flags)) {
6970
6971 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
6972 "FCoE context reset scheduled.\n");
6973 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
6974 &base_vha->dpc_flags))) {
6975 if (qla82xx_fcoe_ctx_reset(base_vha)) {
6976 /* FCoE-ctx reset failed.
6977 * Escalate to chip-reset
6978 */
6979 set_bit(ISP_ABORT_NEEDED,
6980 &base_vha->dpc_flags);
6981 }
6982 clear_bit(ABORT_ISP_ACTIVE,
6983 &base_vha->dpc_flags);
6984 }
6985
6986 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
6987 "FCoE context reset end.\n");
6988 }
6989 } else if (IS_QLAFX00(ha)) {
6990 if (test_and_clear_bit(ISP_UNRECOVERABLE,
6991 &base_vha->dpc_flags)) {
6992 ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
6993 "Firmware Reset Recovery\n");
6994 if (qlafx00_reset_initialize(base_vha)) {
6995 /* Failed. Abort isp later. */
6996 if (!test_bit(UNLOADING,
6997 &base_vha->dpc_flags)) {
6998 set_bit(ISP_UNRECOVERABLE,
6999 &base_vha->dpc_flags);
7000 ql_dbg(ql_dbg_dpc, base_vha,
7001 0x4021,
7002 "Reset Recovery Failed\n");
7003 }
7004 }
7005 }
7006
7007 if (test_and_clear_bit(FX00_TARGET_SCAN,
7008 &base_vha->dpc_flags)) {
7009 ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
7010 "ISPFx00 Target Scan scheduled\n");
7011 if (qlafx00_rescan_isp(base_vha)) {
7012 if (!test_bit(UNLOADING,
7013 &base_vha->dpc_flags))
7014 set_bit(ISP_UNRECOVERABLE,
7015 &base_vha->dpc_flags);
7016 ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
7017 "ISPFx00 Target Scan Failed\n");
7018 }
7019 ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
7020 "ISPFx00 Target Scan End\n");
7021 }
7022 if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
7023 &base_vha->dpc_flags)) {
7024 ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
7025 "ISPFx00 Host Info resend scheduled\n");
7026 qlafx00_fx_disc(base_vha,
7027 &base_vha->hw->mr.fcport,
7028 FXDISC_REG_HOST_INFO);
7029 }
7030 }
7031
7032 if (test_and_clear_bit(DETECT_SFP_CHANGE,
7033 &base_vha->dpc_flags)) {
7034 /* Semantic:
7035 * - NO-OP -- await next ISP-ABORT. Preferred method
7036 * to minimize disruptions that will occur
7037 * when a forced chip-reset occurs.
7038 * - Force -- ISP-ABORT scheduled.
7039 */
7040 /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
7041 }
7042
7043 if (test_and_clear_bit
7044 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
7045 !test_bit(UNLOADING, &base_vha->dpc_flags)) {
7046 bool do_reset = true;
7047
7048 switch (base_vha->qlini_mode) {
7049 case QLA2XXX_INI_MODE_ENABLED:
7050 break;
7051 case QLA2XXX_INI_MODE_DISABLED:
7052 if (!qla_tgt_mode_enabled(base_vha) &&
7053 !ha->flags.fw_started)
7054 do_reset = false;
7055 break;
7056 case QLA2XXX_INI_MODE_DUAL:
7057 if (!qla_dual_mode_enabled(base_vha) &&
7058 !ha->flags.fw_started)
7059 do_reset = false;
7060 break;
7061 default:
7062 break;
7063 }
7064
7065 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
7066 &base_vha->dpc_flags))) {
7067 base_vha->flags.online = 1;
7068 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
7069 "ISP abort scheduled.\n");
7070 if (ha->isp_ops->abort_isp(base_vha)) {
7071 /* failed. retry later */
7072 set_bit(ISP_ABORT_NEEDED,
7073 &base_vha->dpc_flags);
7074 }
7075 clear_bit(ABORT_ISP_ACTIVE,
7076 &base_vha->dpc_flags);
7077 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
7078 "ISP abort end.\n");
7079 }
7080 }
7081
7082 if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
7083 if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
7084 qla24xx_process_purex_list
7085 (&base_vha->purex_list);
7086 clear_bit(PROCESS_PUREX_IOCB,
7087 &base_vha->dpc_flags);
7088 }
7089 }
7090
7091 if (IS_QLAFX00(ha))
7092 goto loop_resync_check;
7093
7094 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
7095 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
7096 "Quiescence mode scheduled.\n");
7097 if (IS_P3P_TYPE(ha)) {
7098 if (IS_QLA82XX(ha))
7099 qla82xx_device_state_handler(base_vha);
7100 if (IS_QLA8044(ha))
7101 qla8044_device_state_handler(base_vha);
7102 clear_bit(ISP_QUIESCE_NEEDED,
7103 &base_vha->dpc_flags);
7104 if (!ha->flags.quiesce_owner) {
7105 qla2x00_perform_loop_resync(base_vha);
7106 if (IS_QLA82XX(ha)) {
7107 qla82xx_idc_lock(ha);
7108 qla82xx_clear_qsnt_ready(
7109 base_vha);
7110 qla82xx_idc_unlock(ha);
7111 } else if (IS_QLA8044(ha)) {
7112 qla8044_idc_lock(ha);
7113 qla8044_clear_qsnt_ready(
7114 base_vha);
7115 qla8044_idc_unlock(ha);
7116 }
7117 }
7118 } else {
7119 clear_bit(ISP_QUIESCE_NEEDED,
7120 &base_vha->dpc_flags);
7121 qla2x00_quiesce_io(base_vha);
7122 }
7123 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
7124 "Quiescence mode end.\n");
7125 }
7126
7127 if (test_and_clear_bit(RESET_MARKER_NEEDED,
7128 &base_vha->dpc_flags) &&
7129 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
7130
7131 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
7132 "Reset marker scheduled.\n");
7133 qla2x00_rst_aen(base_vha);
7134 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
7135 ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
7136 "Reset marker end.\n");
7137 }
7138
7139 /* Retry each device up to login retry count */
7140 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
7141 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
7142 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
7143
7144 if (!base_vha->relogin_jif ||
7145 time_after_eq(jiffies, base_vha->relogin_jif)) {
7146 base_vha->relogin_jif = jiffies + HZ;
7147 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
7148
7149 ql_dbg(ql_dbg_disc, base_vha, 0x400d,
7150 "Relogin scheduled.\n");
7151 qla24xx_post_relogin_work(base_vha);
7152 }
7153 }
7154 loop_resync_check:
7155 if (!qla2x00_reset_active(base_vha) &&
7156 test_and_clear_bit(LOOP_RESYNC_NEEDED,
7157 &base_vha->dpc_flags)) {
7158 /*
7159 * Allow abort_isp to complete before moving on to scanning.
7160 */
7161 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
7162 "Loop resync scheduled.\n");
7163
7164 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
7165 &base_vha->dpc_flags))) {
7166
7167 qla2x00_loop_resync(base_vha);
7168
7169 clear_bit(LOOP_RESYNC_ACTIVE,
7170 &base_vha->dpc_flags);
7171 }
7172
7173 ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
7174 "Loop resync end.\n");
7175 }
7176
7177 if (IS_QLAFX00(ha))
7178 goto intr_on_check;
7179
7180 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
7181 atomic_read(&base_vha->loop_state) == LOOP_READY) {
7182 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
7183 qla2xxx_flash_npiv_conf(base_vha);
7184 }
7185
7186 intr_on_check:
7187 if (!ha->interrupts_on)
7188 ha->isp_ops->enable_intrs(ha);
7189
7190 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
7191 &base_vha->dpc_flags)) {
7192 if (ha->beacon_blink_led == 1)
7193 ha->isp_ops->beacon_blink(base_vha);
7194 }
7195
7196 /* qpair online check */
7197 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
7198 &base_vha->dpc_flags)) {
7199 if (ha->flags.eeh_busy ||
7200 ha->flags.pci_channel_io_perm_failure)
7201 online = 0;
7202 else
7203 online = 1;
7204
7205 mutex_lock(&ha->mq_lock);
7206 list_for_each_entry(qpair, &base_vha->qp_list,
7207 qp_list_elem)
7208 qpair->online = online;
7209 mutex_unlock(&ha->mq_lock);
7210 }
7211
7212 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
7213 &base_vha->dpc_flags)) {
7214 u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
7215
7216 if (threshold > ha->orig_fw_xcb_count)
7217 threshold = ha->orig_fw_xcb_count;
7218
7219 ql_log(ql_log_info, base_vha, 0xffffff,
7220 "SET ZIO Activity exchange threshold to %d.\n",
7221 threshold);
7222 if (qla27xx_set_zio_threshold(base_vha, threshold)) {
7223 ql_log(ql_log_info, base_vha, 0xffffff,
7224 "Unable to SET ZIO Activity exchange threshold to %d.\n",
7225 threshold);
7226 }
7227 }
7228
7229 if (!IS_QLAFX00(ha))
7230 qla2x00_do_dpc_all_vps(base_vha);
7231
7232 if (test_and_clear_bit(N2N_LINK_RESET,
7233 &base_vha->dpc_flags)) {
7234 qla2x00_lip_reset(base_vha);
7235 }
7236
7237 ha->dpc_active = 0;
7238 end_loop:
7239 set_current_state(TASK_INTERRUPTIBLE);
7240 } /* End of while(1) */
7241 __set_current_state(TASK_RUNNING);
7242
7243 ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
7244 "DPC handler exiting.\n");
7245
7246 /*
7247 * Make sure that nobody tries to wake us up again.
7248 */
7249 ha->dpc_active = 0;
7250
7251 /* Cleanup any residual CTX SRBs. */
7252 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
7253
7254 return 0;
7255 }
7256
7257 void
qla2xxx_wake_dpc(struct scsi_qla_host * vha)7258 qla2xxx_wake_dpc(struct scsi_qla_host *vha)
7259 {
7260 struct qla_hw_data *ha = vha->hw;
7261 struct task_struct *t = ha->dpc_thread;
7262
7263 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
7264 wake_up_process(t);
7265 }
7266
7267 /*
7268 * qla2x00_rst_aen
7269 * Processes asynchronous reset.
7270 *
7271 * Input:
7272 * ha = adapter block pointer.
7273 */
7274 static void
qla2x00_rst_aen(scsi_qla_host_t * vha)7275 qla2x00_rst_aen(scsi_qla_host_t *vha)
7276 {
7277 if (vha->flags.online && !vha->flags.reset_active &&
7278 !atomic_read(&vha->loop_down_timer) &&
7279 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
7280 do {
7281 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7282
7283 /*
7284 * Issue marker command only when we are going to start
7285 * the I/O.
7286 */
7287 vha->marker_needed = 1;
7288 } while (!atomic_read(&vha->loop_down_timer) &&
7289 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
7290 }
7291 }
7292
qla_do_heartbeat(struct scsi_qla_host * vha)7293 static bool qla_do_heartbeat(struct scsi_qla_host *vha)
7294 {
7295 struct qla_hw_data *ha = vha->hw;
7296 u32 cmpl_cnt;
7297 u16 i;
7298 bool do_heartbeat = false;
7299
7300 /*
7301 * Allow do_heartbeat only if we don’t have any active interrupts,
7302 * but there are still IOs outstanding with firmware.
7303 */
7304 cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
7305 if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
7306 cmpl_cnt != ha->base_qpair->cmd_cnt) {
7307 do_heartbeat = true;
7308 goto skip;
7309 }
7310 ha->base_qpair->prev_completion_cnt = cmpl_cnt;
7311
7312 for (i = 0; i < ha->max_qpairs; i++) {
7313 if (ha->queue_pair_map[i]) {
7314 cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
7315 if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
7316 cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
7317 do_heartbeat = true;
7318 break;
7319 }
7320 ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
7321 }
7322 }
7323
7324 skip:
7325 return do_heartbeat;
7326 }
7327
qla_heart_beat(struct scsi_qla_host * vha,u16 dpc_started)7328 static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
7329 {
7330 struct qla_hw_data *ha = vha->hw;
7331
7332 if (vha->vp_idx)
7333 return;
7334
7335 if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
7336 return;
7337
7338 /*
7339 * dpc thread cannot run if heartbeat is running at the same time.
7340 * We also do not want to starve heartbeat task. Therefore, do
7341 * heartbeat task at least once every 5 seconds.
7342 */
7343 if (dpc_started &&
7344 time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
7345 return;
7346
7347 if (qla_do_heartbeat(vha)) {
7348 ha->last_heartbeat_run_jiffies = jiffies;
7349 queue_work(ha->wq, &ha->heartbeat_work);
7350 }
7351 }
7352
qla_wind_down_chip(scsi_qla_host_t * vha)7353 static void qla_wind_down_chip(scsi_qla_host_t *vha)
7354 {
7355 struct qla_hw_data *ha = vha->hw;
7356
7357 if (!ha->flags.eeh_busy)
7358 return;
7359 if (ha->pci_error_state)
7360 /* system is trying to recover */
7361 return;
7362
7363 /*
7364 * Current system is not handling PCIE error. At this point, this is
7365 * best effort to wind down the adapter.
7366 */
7367 if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
7368 !ha->flags.eeh_flush) {
7369 ql_log(ql_log_info, vha, 0x9009,
7370 "PCI Error detected, attempting to reset hardware.\n");
7371
7372 ha->isp_ops->reset_chip(vha);
7373 ha->isp_ops->disable_intrs(ha);
7374
7375 ha->flags.eeh_flush = EEH_FLUSH_RDY;
7376 ha->eeh_jif = jiffies;
7377
7378 } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
7379 time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
7380 pci_clear_master(ha->pdev);
7381
7382 /* flush all command */
7383 qla2x00_abort_isp_cleanup(vha);
7384 ha->flags.eeh_flush = EEH_FLUSH_DONE;
7385
7386 ql_log(ql_log_info, vha, 0x900a,
7387 "PCI Error handling complete, all IOs aborted.\n");
7388 }
7389 }
7390
7391 /**************************************************************************
7392 * qla2x00_timer
7393 *
7394 * Description:
7395 * One second timer
7396 *
7397 * Context: Interrupt
7398 ***************************************************************************/
7399 void
qla2x00_timer(struct timer_list * t)7400 qla2x00_timer(struct timer_list *t)
7401 {
7402 scsi_qla_host_t *vha = from_timer(vha, t, timer);
7403 unsigned long cpu_flags = 0;
7404 int start_dpc = 0;
7405 int index;
7406 srb_t *sp;
7407 uint16_t w;
7408 struct qla_hw_data *ha = vha->hw;
7409 struct req_que *req;
7410 unsigned long flags;
7411 fc_port_t *fcport = NULL;
7412
7413 if (ha->flags.eeh_busy) {
7414 qla_wind_down_chip(vha);
7415
7416 ql_dbg(ql_dbg_timer, vha, 0x6000,
7417 "EEH = %d, restarting timer.\n",
7418 ha->flags.eeh_busy);
7419 qla2x00_restart_timer(vha, WATCH_INTERVAL);
7420 return;
7421 }
7422
7423 /*
7424 * Hardware read to raise pending EEH errors during mailbox waits. If
7425 * the read returns -1 then disable the board.
7426 */
7427 if (!pci_channel_offline(ha->pdev)) {
7428 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
7429 qla2x00_check_reg16_for_disconnect(vha, w);
7430 }
7431
7432 /* Make sure qla82xx_watchdog is run only for physical port */
7433 if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
7434 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
7435 start_dpc++;
7436 if (IS_QLA82XX(ha))
7437 qla82xx_watchdog(vha);
7438 else if (IS_QLA8044(ha))
7439 qla8044_watchdog(vha);
7440 }
7441
7442 if (!vha->vp_idx && IS_QLAFX00(ha))
7443 qlafx00_timer_routine(vha);
7444
7445 if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
7446 vha->link_down_time++;
7447
7448 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
7449 list_for_each_entry(fcport, &vha->vp_fcports, list) {
7450 if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
7451 fcport->tgt_link_down_time++;
7452 }
7453 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
7454
7455 /* Loop down handler. */
7456 if (atomic_read(&vha->loop_down_timer) > 0 &&
7457 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
7458 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
7459 && vha->flags.online) {
7460
7461 if (atomic_read(&vha->loop_down_timer) ==
7462 vha->loop_down_abort_time) {
7463
7464 ql_log(ql_log_info, vha, 0x6008,
7465 "Loop down - aborting the queues before time expires.\n");
7466
7467 if (!IS_QLA2100(ha) && vha->link_down_timeout)
7468 atomic_set(&vha->loop_state, LOOP_DEAD);
7469
7470 /*
7471 * Schedule an ISP abort to return any FCP2-device
7472 * commands.
7473 */
7474 /* NPIV - scan physical port only */
7475 if (!vha->vp_idx) {
7476 spin_lock_irqsave(&ha->hardware_lock,
7477 cpu_flags);
7478 req = ha->req_q_map[0];
7479 for (index = 1;
7480 index < req->num_outstanding_cmds;
7481 index++) {
7482 fc_port_t *sfcp;
7483
7484 sp = req->outstanding_cmds[index];
7485 if (!sp)
7486 continue;
7487 if (sp->cmd_type != TYPE_SRB)
7488 continue;
7489 if (sp->type != SRB_SCSI_CMD)
7490 continue;
7491 sfcp = sp->fcport;
7492 if (!(sfcp->flags & FCF_FCP2_DEVICE))
7493 continue;
7494
7495 if (IS_QLA82XX(ha))
7496 set_bit(FCOE_CTX_RESET_NEEDED,
7497 &vha->dpc_flags);
7498 else
7499 set_bit(ISP_ABORT_NEEDED,
7500 &vha->dpc_flags);
7501 break;
7502 }
7503 spin_unlock_irqrestore(&ha->hardware_lock,
7504 cpu_flags);
7505 }
7506 start_dpc++;
7507 }
7508
7509 /* if the loop has been down for 4 minutes, reinit adapter */
7510 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
7511 if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
7512 ql_log(ql_log_warn, vha, 0x6009,
7513 "Loop down - aborting ISP.\n");
7514
7515 if (IS_QLA82XX(ha))
7516 set_bit(FCOE_CTX_RESET_NEEDED,
7517 &vha->dpc_flags);
7518 else
7519 set_bit(ISP_ABORT_NEEDED,
7520 &vha->dpc_flags);
7521 }
7522 }
7523 ql_dbg(ql_dbg_timer, vha, 0x600a,
7524 "Loop down - seconds remaining %d.\n",
7525 atomic_read(&vha->loop_down_timer));
7526 }
7527 /* Check if beacon LED needs to be blinked for physical host only */
7528 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
7529 /* There is no beacon_blink function for ISP82xx */
7530 if (!IS_P3P_TYPE(ha)) {
7531 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
7532 start_dpc++;
7533 }
7534 }
7535
7536 /* check if edif running */
7537 if (vha->hw->flags.edif_enabled)
7538 qla_edif_timer(vha);
7539
7540 /* Process any deferred work. */
7541 if (!list_empty(&vha->work_list)) {
7542 unsigned long flags;
7543 bool q = false;
7544
7545 spin_lock_irqsave(&vha->work_lock, flags);
7546 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
7547 q = true;
7548 spin_unlock_irqrestore(&vha->work_lock, flags);
7549 if (q)
7550 queue_work(vha->hw->wq, &vha->iocb_work);
7551 }
7552
7553 /*
7554 * FC-NVME
7555 * see if the active AEN count has changed from what was last reported.
7556 */
7557 index = atomic_read(&ha->nvme_active_aen_cnt);
7558 if (!vha->vp_idx &&
7559 (index != ha->nvme_last_rptd_aen) &&
7560 ha->zio_mode == QLA_ZIO_MODE_6 &&
7561 !ha->flags.host_shutting_down) {
7562 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
7563 ql_log(ql_log_info, vha, 0x3002,
7564 "nvme: Sched: Set ZIO exchange threshold to %d.\n",
7565 ha->nvme_last_rptd_aen);
7566 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7567 start_dpc++;
7568 }
7569
7570 if (!vha->vp_idx &&
7571 atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
7572 IS_ZIO_THRESHOLD_CAPABLE(ha)) {
7573 ql_log(ql_log_info, vha, 0x3002,
7574 "Sched: Set ZIO exchange threshold to %d.\n",
7575 ha->last_zio_threshold);
7576 ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
7577 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7578 start_dpc++;
7579 }
7580 qla_adjust_buf(vha);
7581
7582 /* borrowing w to signify dpc will run */
7583 w = 0;
7584 /* Schedule the DPC routine if needed */
7585 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
7586 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
7587 start_dpc ||
7588 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
7589 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
7590 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
7591 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
7592 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
7593 test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
7594 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
7595 ql_dbg(ql_dbg_timer, vha, 0x600b,
7596 "isp_abort_needed=%d loop_resync_needed=%d "
7597 "start_dpc=%d reset_marker_needed=%d",
7598 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
7599 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
7600 start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
7601 ql_dbg(ql_dbg_timer, vha, 0x600c,
7602 "beacon_blink_needed=%d isp_unrecoverable=%d "
7603 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
7604 "relogin_needed=%d, Process_purex_iocb=%d.\n",
7605 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
7606 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
7607 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
7608 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
7609 test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
7610 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
7611 qla2xxx_wake_dpc(vha);
7612 w = 1;
7613 }
7614
7615 qla_heart_beat(vha, w);
7616
7617 qla2x00_restart_timer(vha, WATCH_INTERVAL);
7618 }
7619
7620 /* Firmware interface routines. */
7621
7622 #define FW_ISP21XX 0
7623 #define FW_ISP22XX 1
7624 #define FW_ISP2300 2
7625 #define FW_ISP2322 3
7626 #define FW_ISP24XX 4
7627 #define FW_ISP25XX 5
7628 #define FW_ISP81XX 6
7629 #define FW_ISP82XX 7
7630 #define FW_ISP2031 8
7631 #define FW_ISP8031 9
7632 #define FW_ISP27XX 10
7633 #define FW_ISP28XX 11
7634
7635 #define FW_FILE_ISP21XX "ql2100_fw.bin"
7636 #define FW_FILE_ISP22XX "ql2200_fw.bin"
7637 #define FW_FILE_ISP2300 "ql2300_fw.bin"
7638 #define FW_FILE_ISP2322 "ql2322_fw.bin"
7639 #define FW_FILE_ISP24XX "ql2400_fw.bin"
7640 #define FW_FILE_ISP25XX "ql2500_fw.bin"
7641 #define FW_FILE_ISP81XX "ql8100_fw.bin"
7642 #define FW_FILE_ISP82XX "ql8200_fw.bin"
7643 #define FW_FILE_ISP2031 "ql2600_fw.bin"
7644 #define FW_FILE_ISP8031 "ql8300_fw.bin"
7645 #define FW_FILE_ISP27XX "ql2700_fw.bin"
7646 #define FW_FILE_ISP28XX "ql2800_fw.bin"
7647
7648
7649 static DEFINE_MUTEX(qla_fw_lock);
7650
7651 static struct fw_blob qla_fw_blobs[] = {
7652 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
7653 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
7654 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
7655 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
7656 { .name = FW_FILE_ISP24XX, },
7657 { .name = FW_FILE_ISP25XX, },
7658 { .name = FW_FILE_ISP81XX, },
7659 { .name = FW_FILE_ISP82XX, },
7660 { .name = FW_FILE_ISP2031, },
7661 { .name = FW_FILE_ISP8031, },
7662 { .name = FW_FILE_ISP27XX, },
7663 { .name = FW_FILE_ISP28XX, },
7664 { .name = NULL, },
7665 };
7666
7667 struct fw_blob *
qla2x00_request_firmware(scsi_qla_host_t * vha)7668 qla2x00_request_firmware(scsi_qla_host_t *vha)
7669 {
7670 struct qla_hw_data *ha = vha->hw;
7671 struct fw_blob *blob;
7672
7673 if (IS_QLA2100(ha)) {
7674 blob = &qla_fw_blobs[FW_ISP21XX];
7675 } else if (IS_QLA2200(ha)) {
7676 blob = &qla_fw_blobs[FW_ISP22XX];
7677 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
7678 blob = &qla_fw_blobs[FW_ISP2300];
7679 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
7680 blob = &qla_fw_blobs[FW_ISP2322];
7681 } else if (IS_QLA24XX_TYPE(ha)) {
7682 blob = &qla_fw_blobs[FW_ISP24XX];
7683 } else if (IS_QLA25XX(ha)) {
7684 blob = &qla_fw_blobs[FW_ISP25XX];
7685 } else if (IS_QLA81XX(ha)) {
7686 blob = &qla_fw_blobs[FW_ISP81XX];
7687 } else if (IS_QLA82XX(ha)) {
7688 blob = &qla_fw_blobs[FW_ISP82XX];
7689 } else if (IS_QLA2031(ha)) {
7690 blob = &qla_fw_blobs[FW_ISP2031];
7691 } else if (IS_QLA8031(ha)) {
7692 blob = &qla_fw_blobs[FW_ISP8031];
7693 } else if (IS_QLA27XX(ha)) {
7694 blob = &qla_fw_blobs[FW_ISP27XX];
7695 } else if (IS_QLA28XX(ha)) {
7696 blob = &qla_fw_blobs[FW_ISP28XX];
7697 } else {
7698 return NULL;
7699 }
7700
7701 if (!blob->name)
7702 return NULL;
7703
7704 mutex_lock(&qla_fw_lock);
7705 if (blob->fw)
7706 goto out;
7707
7708 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
7709 ql_log(ql_log_warn, vha, 0x0063,
7710 "Failed to load firmware image (%s).\n", blob->name);
7711 blob->fw = NULL;
7712 blob = NULL;
7713 }
7714
7715 out:
7716 mutex_unlock(&qla_fw_lock);
7717 return blob;
7718 }
7719
7720 static void
qla2x00_release_firmware(void)7721 qla2x00_release_firmware(void)
7722 {
7723 struct fw_blob *blob;
7724
7725 mutex_lock(&qla_fw_lock);
7726 for (blob = qla_fw_blobs; blob->name; blob++)
7727 release_firmware(blob->fw);
7728 mutex_unlock(&qla_fw_lock);
7729 }
7730
qla_pci_error_cleanup(scsi_qla_host_t * vha)7731 static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
7732 {
7733 struct qla_hw_data *ha = vha->hw;
7734 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
7735 struct qla_qpair *qpair = NULL;
7736 struct scsi_qla_host *vp, *tvp;
7737 fc_port_t *fcport;
7738 int i;
7739 unsigned long flags;
7740
7741 ql_dbg(ql_dbg_aer, vha, 0x9000,
7742 "%s\n", __func__);
7743 ha->chip_reset++;
7744
7745 ha->base_qpair->chip_reset = ha->chip_reset;
7746 for (i = 0; i < ha->max_qpairs; i++) {
7747 if (ha->queue_pair_map[i])
7748 ha->queue_pair_map[i]->chip_reset =
7749 ha->base_qpair->chip_reset;
7750 }
7751
7752 /*
7753 * purge mailbox might take a while. Slot Reset/chip reset
7754 * will take care of the purge
7755 */
7756
7757 mutex_lock(&ha->mq_lock);
7758 ha->base_qpair->online = 0;
7759 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7760 qpair->online = 0;
7761 wmb();
7762 mutex_unlock(&ha->mq_lock);
7763
7764 qla2x00_mark_all_devices_lost(vha);
7765
7766 spin_lock_irqsave(&ha->vport_slock, flags);
7767 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7768 atomic_inc(&vp->vref_count);
7769 spin_unlock_irqrestore(&ha->vport_slock, flags);
7770 qla2x00_mark_all_devices_lost(vp);
7771 spin_lock_irqsave(&ha->vport_slock, flags);
7772 atomic_dec(&vp->vref_count);
7773 }
7774 spin_unlock_irqrestore(&ha->vport_slock, flags);
7775
7776 /* Clear all async request states across all VPs. */
7777 list_for_each_entry(fcport, &vha->vp_fcports, list)
7778 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7779
7780 spin_lock_irqsave(&ha->vport_slock, flags);
7781 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7782 atomic_inc(&vp->vref_count);
7783 spin_unlock_irqrestore(&ha->vport_slock, flags);
7784 list_for_each_entry(fcport, &vp->vp_fcports, list)
7785 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7786 spin_lock_irqsave(&ha->vport_slock, flags);
7787 atomic_dec(&vp->vref_count);
7788 }
7789 spin_unlock_irqrestore(&ha->vport_slock, flags);
7790 }
7791
7792
7793 static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)7794 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7795 {
7796 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
7797 struct qla_hw_data *ha = vha->hw;
7798 pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
7799
7800 ql_log(ql_log_warn, vha, 0x9000,
7801 "PCI error detected, state %x.\n", state);
7802 ha->pci_error_state = QLA_PCI_ERR_DETECTED;
7803
7804 if (!atomic_read(&pdev->enable_cnt)) {
7805 ql_log(ql_log_info, vha, 0xffff,
7806 "PCI device is disabled,state %x\n", state);
7807 ret = PCI_ERS_RESULT_NEED_RESET;
7808 goto out;
7809 }
7810
7811 switch (state) {
7812 case pci_channel_io_normal:
7813 qla_pci_set_eeh_busy(vha);
7814 if (ql2xmqsupport || ql2xnvmeenable) {
7815 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
7816 qla2xxx_wake_dpc(vha);
7817 }
7818 ret = PCI_ERS_RESULT_CAN_RECOVER;
7819 break;
7820 case pci_channel_io_frozen:
7821 qla_pci_set_eeh_busy(vha);
7822 ret = PCI_ERS_RESULT_NEED_RESET;
7823 break;
7824 case pci_channel_io_perm_failure:
7825 ha->flags.pci_channel_io_perm_failure = 1;
7826 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
7827 if (ql2xmqsupport || ql2xnvmeenable) {
7828 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
7829 qla2xxx_wake_dpc(vha);
7830 }
7831 ret = PCI_ERS_RESULT_DISCONNECT;
7832 }
7833 out:
7834 ql_dbg(ql_dbg_aer, vha, 0x600d,
7835 "PCI error detected returning [%x].\n", ret);
7836 return ret;
7837 }
7838
7839 static pci_ers_result_t
qla2xxx_pci_mmio_enabled(struct pci_dev * pdev)7840 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
7841 {
7842 int risc_paused = 0;
7843 uint32_t stat;
7844 unsigned long flags;
7845 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7846 struct qla_hw_data *ha = base_vha->hw;
7847 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7848 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
7849
7850 ql_log(ql_log_warn, base_vha, 0x9000,
7851 "mmio enabled\n");
7852
7853 ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
7854
7855 if (IS_QLA82XX(ha))
7856 return PCI_ERS_RESULT_RECOVERED;
7857
7858 if (qla2x00_isp_reg_stat(ha)) {
7859 ql_log(ql_log_info, base_vha, 0x803f,
7860 "During mmio enabled, PCI/Register disconnect still detected.\n");
7861 goto out;
7862 }
7863
7864 spin_lock_irqsave(&ha->hardware_lock, flags);
7865 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
7866 stat = rd_reg_word(®->hccr);
7867 if (stat & HCCR_RISC_PAUSE)
7868 risc_paused = 1;
7869 } else if (IS_QLA23XX(ha)) {
7870 stat = rd_reg_dword(®->u.isp2300.host_status);
7871 if (stat & HSR_RISC_PAUSED)
7872 risc_paused = 1;
7873 } else if (IS_FWI2_CAPABLE(ha)) {
7874 stat = rd_reg_dword(®24->host_status);
7875 if (stat & HSRX_RISC_PAUSED)
7876 risc_paused = 1;
7877 }
7878 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7879
7880 if (risc_paused) {
7881 ql_log(ql_log_info, base_vha, 0x9003,
7882 "RISC paused -- mmio_enabled, Dumping firmware.\n");
7883 qla2xxx_dump_fw(base_vha);
7884 }
7885 out:
7886 /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
7887 ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7888 "mmio enabled returning.\n");
7889 return PCI_ERS_RESULT_NEED_RESET;
7890 }
7891
7892 static pci_ers_result_t
qla2xxx_pci_slot_reset(struct pci_dev * pdev)7893 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
7894 {
7895 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
7896 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7897 struct qla_hw_data *ha = base_vha->hw;
7898 int rc;
7899 struct qla_qpair *qpair = NULL;
7900
7901 ql_log(ql_log_warn, base_vha, 0x9004,
7902 "Slot Reset.\n");
7903
7904 ha->pci_error_state = QLA_PCI_SLOT_RESET;
7905 /* Workaround: qla2xxx driver which access hardware earlier
7906 * needs error state to be pci_channel_io_online.
7907 * Otherwise mailbox command timesout.
7908 */
7909 pdev->error_state = pci_channel_io_normal;
7910
7911 pci_restore_state(pdev);
7912
7913 /* pci_restore_state() clears the saved_state flag of the device
7914 * save restored state which resets saved_state flag
7915 */
7916 pci_save_state(pdev);
7917
7918 if (ha->mem_only)
7919 rc = pci_enable_device_mem(pdev);
7920 else
7921 rc = pci_enable_device(pdev);
7922
7923 if (rc) {
7924 ql_log(ql_log_warn, base_vha, 0x9005,
7925 "Can't re-enable PCI device after reset.\n");
7926 goto exit_slot_reset;
7927 }
7928
7929
7930 if (ha->isp_ops->pci_config(base_vha))
7931 goto exit_slot_reset;
7932
7933 mutex_lock(&ha->mq_lock);
7934 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7935 qpair->online = 1;
7936 mutex_unlock(&ha->mq_lock);
7937
7938 ha->flags.eeh_busy = 0;
7939 base_vha->flags.online = 1;
7940 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7941 ha->isp_ops->abort_isp(base_vha);
7942 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7943
7944 if (qla2x00_isp_reg_stat(ha)) {
7945 ha->flags.eeh_busy = 1;
7946 qla_pci_error_cleanup(base_vha);
7947 ql_log(ql_log_warn, base_vha, 0x9005,
7948 "Device unable to recover from PCI error.\n");
7949 } else {
7950 ret = PCI_ERS_RESULT_RECOVERED;
7951 }
7952
7953 exit_slot_reset:
7954 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
7955 "Slot Reset returning %x.\n", ret);
7956
7957 return ret;
7958 }
7959
7960 static void
qla2xxx_pci_resume(struct pci_dev * pdev)7961 qla2xxx_pci_resume(struct pci_dev *pdev)
7962 {
7963 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7964 struct qla_hw_data *ha = base_vha->hw;
7965 int ret;
7966
7967 ql_log(ql_log_warn, base_vha, 0x900f,
7968 "Pci Resume.\n");
7969
7970
7971 ret = qla2x00_wait_for_hba_online(base_vha);
7972 if (ret != QLA_SUCCESS) {
7973 ql_log(ql_log_fatal, base_vha, 0x9002,
7974 "The device failed to resume I/O from slot/link_reset.\n");
7975 }
7976 ha->pci_error_state = QLA_PCI_RESUME;
7977 ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7978 "Pci Resume returning.\n");
7979 }
7980
qla_pci_set_eeh_busy(struct scsi_qla_host * vha)7981 void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
7982 {
7983 struct qla_hw_data *ha = vha->hw;
7984 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7985 bool do_cleanup = false;
7986 unsigned long flags;
7987
7988 if (ha->flags.eeh_busy)
7989 return;
7990
7991 spin_lock_irqsave(&base_vha->work_lock, flags);
7992 if (!ha->flags.eeh_busy) {
7993 ha->eeh_jif = jiffies;
7994 ha->flags.eeh_flush = 0;
7995
7996 ha->flags.eeh_busy = 1;
7997 do_cleanup = true;
7998 }
7999 spin_unlock_irqrestore(&base_vha->work_lock, flags);
8000
8001 if (do_cleanup)
8002 qla_pci_error_cleanup(base_vha);
8003 }
8004
8005 /*
8006 * this routine will schedule a task to pause IO from interrupt context
8007 * if caller sees a PCIE error event (register read = 0xf's)
8008 */
qla_schedule_eeh_work(struct scsi_qla_host * vha)8009 void qla_schedule_eeh_work(struct scsi_qla_host *vha)
8010 {
8011 struct qla_hw_data *ha = vha->hw;
8012 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8013
8014 if (ha->flags.eeh_busy)
8015 return;
8016
8017 set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
8018 qla2xxx_wake_dpc(base_vha);
8019 }
8020
8021 static void
qla_pci_reset_prepare(struct pci_dev * pdev)8022 qla_pci_reset_prepare(struct pci_dev *pdev)
8023 {
8024 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
8025 struct qla_hw_data *ha = base_vha->hw;
8026 struct qla_qpair *qpair;
8027
8028 ql_log(ql_log_warn, base_vha, 0xffff,
8029 "%s.\n", __func__);
8030
8031 /*
8032 * PCI FLR/function reset is about to reset the
8033 * slot. Stop the chip to stop all DMA access.
8034 * It is assumed that pci_reset_done will be called
8035 * after FLR to resume Chip operation.
8036 */
8037 ha->flags.eeh_busy = 1;
8038 mutex_lock(&ha->mq_lock);
8039 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
8040 qpair->online = 0;
8041 mutex_unlock(&ha->mq_lock);
8042
8043 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
8044 qla2x00_abort_isp_cleanup(base_vha);
8045 qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
8046 }
8047
8048 static void
qla_pci_reset_done(struct pci_dev * pdev)8049 qla_pci_reset_done(struct pci_dev *pdev)
8050 {
8051 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
8052 struct qla_hw_data *ha = base_vha->hw;
8053 struct qla_qpair *qpair;
8054
8055 ql_log(ql_log_warn, base_vha, 0xffff,
8056 "%s.\n", __func__);
8057
8058 /*
8059 * FLR just completed by PCI layer. Resume adapter
8060 */
8061 ha->flags.eeh_busy = 0;
8062 mutex_lock(&ha->mq_lock);
8063 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
8064 qpair->online = 1;
8065 mutex_unlock(&ha->mq_lock);
8066
8067 base_vha->flags.online = 1;
8068 ha->isp_ops->abort_isp(base_vha);
8069 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
8070 }
8071
qla2xxx_map_queues(struct Scsi_Host * shost)8072 static void qla2xxx_map_queues(struct Scsi_Host *shost)
8073 {
8074 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
8075 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
8076
8077 if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
8078 blk_mq_map_queues(qmap);
8079 else
8080 blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
8081 }
8082
8083 struct scsi_host_template qla2xxx_driver_template = {
8084 .module = THIS_MODULE,
8085 .name = QLA2XXX_DRIVER_NAME,
8086 .queuecommand = qla2xxx_queuecommand,
8087
8088 .eh_timed_out = fc_eh_timed_out,
8089 .eh_abort_handler = qla2xxx_eh_abort,
8090 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
8091 .eh_device_reset_handler = qla2xxx_eh_device_reset,
8092 .eh_target_reset_handler = qla2xxx_eh_target_reset,
8093 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
8094 .eh_host_reset_handler = qla2xxx_eh_host_reset,
8095
8096 .slave_configure = qla2xxx_slave_configure,
8097
8098 .slave_alloc = qla2xxx_slave_alloc,
8099 .slave_destroy = qla2xxx_slave_destroy,
8100 .scan_finished = qla2xxx_scan_finished,
8101 .scan_start = qla2xxx_scan_start,
8102 .change_queue_depth = scsi_change_queue_depth,
8103 .map_queues = qla2xxx_map_queues,
8104 .this_id = -1,
8105 .cmd_per_lun = 3,
8106 .sg_tablesize = SG_ALL,
8107
8108 .max_sectors = 0xFFFF,
8109 .shost_groups = qla2x00_host_groups,
8110
8111 .supported_mode = MODE_INITIATOR,
8112 .track_queue_depth = 1,
8113 .cmd_size = sizeof(srb_t),
8114 };
8115
8116 static const struct pci_error_handlers qla2xxx_err_handler = {
8117 .error_detected = qla2xxx_pci_error_detected,
8118 .mmio_enabled = qla2xxx_pci_mmio_enabled,
8119 .slot_reset = qla2xxx_pci_slot_reset,
8120 .resume = qla2xxx_pci_resume,
8121 .reset_prepare = qla_pci_reset_prepare,
8122 .reset_done = qla_pci_reset_done,
8123 };
8124
8125 static struct pci_device_id qla2xxx_pci_tbl[] = {
8126 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
8127 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
8128 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
8129 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
8130 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
8131 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
8132 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
8133 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
8134 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
8135 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
8136 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
8137 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
8138 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
8139 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
8140 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
8141 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
8142 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
8143 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
8144 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
8145 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
8146 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
8147 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
8148 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
8149 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
8150 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
8151 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
8152 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
8153 { 0 },
8154 };
8155 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
8156
8157 static struct pci_driver qla2xxx_pci_driver = {
8158 .name = QLA2XXX_DRIVER_NAME,
8159 .id_table = qla2xxx_pci_tbl,
8160 .probe = qla2x00_probe_one,
8161 .remove = qla2x00_remove_one,
8162 .shutdown = qla2x00_shutdown,
8163 .err_handler = &qla2xxx_err_handler,
8164 };
8165
8166 static const struct file_operations apidev_fops = {
8167 .owner = THIS_MODULE,
8168 .llseek = noop_llseek,
8169 };
8170
8171 /**
8172 * qla2x00_module_init - Module initialization.
8173 **/
8174 static int __init
qla2x00_module_init(void)8175 qla2x00_module_init(void)
8176 {
8177 int ret = 0;
8178
8179 BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
8180 BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
8181 BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
8182 BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
8183 BUILD_BUG_ON(sizeof(init_cb_t) != 96);
8184 BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
8185 BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
8186 BUILD_BUG_ON(sizeof(request_t) != 64);
8187 BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
8188 BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
8189 BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
8190 BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
8191 BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
8192 BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
8193 BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
8194 BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
8195 BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
8196 BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
8197 BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
8198 BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
8199 BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
8200 BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
8201 BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
8202 BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
8203 BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
8204 BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
8205 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
8206 BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
8207 BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
8208 BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
8209 BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
8210 BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
8211 BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
8212 BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
8213 BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
8214 BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
8215 BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
8216 BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
8217 BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
8218 BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
8219 BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
8220 BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
8221 BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
8222 BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
8223 BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
8224 BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
8225 BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
8226 BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
8227 BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
8228 BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
8229 BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
8230 BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
8231 BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
8232 BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
8233 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
8234 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
8235 BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
8236 BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
8237 BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
8238 BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
8239 BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
8240 BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
8241 BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
8242 BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
8243 BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
8244 BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
8245 BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
8246 BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
8247 BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
8248 BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
8249 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
8250 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
8251 BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
8252 BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
8253 BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
8254 BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
8255 BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
8256 BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
8257 BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
8258 BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
8259 BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
8260 BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
8261 BUILD_BUG_ON(sizeof(sw_info_t) != 32);
8262 BUILD_BUG_ON(sizeof(target_id_t) != 2);
8263
8264 qla_trace_init();
8265
8266 /* Allocate cache for SRBs. */
8267 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
8268 SLAB_HWCACHE_ALIGN, NULL);
8269 if (srb_cachep == NULL) {
8270 ql_log(ql_log_fatal, NULL, 0x0001,
8271 "Unable to allocate SRB cache...Failing load!.\n");
8272 return -ENOMEM;
8273 }
8274
8275 /* Initialize target kmem_cache and mem_pools */
8276 ret = qlt_init();
8277 if (ret < 0) {
8278 goto destroy_cache;
8279 } else if (ret > 0) {
8280 /*
8281 * If initiator mode is explictly disabled by qlt_init(),
8282 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
8283 * performing scsi_scan_target() during LOOP UP event.
8284 */
8285 qla2xxx_transport_functions.disable_target_scan = 1;
8286 qla2xxx_transport_vport_functions.disable_target_scan = 1;
8287 }
8288
8289 /* Derive version string. */
8290 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
8291 if (ql2xextended_error_logging)
8292 strcat(qla2x00_version_str, "-debug");
8293 if (ql2xextended_error_logging == 1)
8294 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
8295
8296 qla2xxx_transport_template =
8297 fc_attach_transport(&qla2xxx_transport_functions);
8298 if (!qla2xxx_transport_template) {
8299 ql_log(ql_log_fatal, NULL, 0x0002,
8300 "fc_attach_transport failed...Failing load!.\n");
8301 ret = -ENODEV;
8302 goto qlt_exit;
8303 }
8304
8305 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
8306 if (apidev_major < 0) {
8307 ql_log(ql_log_fatal, NULL, 0x0003,
8308 "Unable to register char device %s.\n", QLA2XXX_APIDEV);
8309 }
8310
8311 qla2xxx_transport_vport_template =
8312 fc_attach_transport(&qla2xxx_transport_vport_functions);
8313 if (!qla2xxx_transport_vport_template) {
8314 ql_log(ql_log_fatal, NULL, 0x0004,
8315 "fc_attach_transport vport failed...Failing load!.\n");
8316 ret = -ENODEV;
8317 goto unreg_chrdev;
8318 }
8319 ql_log(ql_log_info, NULL, 0x0005,
8320 "QLogic Fibre Channel HBA Driver: %s.\n",
8321 qla2x00_version_str);
8322 ret = pci_register_driver(&qla2xxx_pci_driver);
8323 if (ret) {
8324 ql_log(ql_log_fatal, NULL, 0x0006,
8325 "pci_register_driver failed...ret=%d Failing load!.\n",
8326 ret);
8327 goto release_vport_transport;
8328 }
8329 return ret;
8330
8331 release_vport_transport:
8332 fc_release_transport(qla2xxx_transport_vport_template);
8333
8334 unreg_chrdev:
8335 if (apidev_major >= 0)
8336 unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
8337 fc_release_transport(qla2xxx_transport_template);
8338
8339 qlt_exit:
8340 qlt_exit();
8341
8342 destroy_cache:
8343 kmem_cache_destroy(srb_cachep);
8344
8345 qla_trace_uninit();
8346 return ret;
8347 }
8348
8349 /**
8350 * qla2x00_module_exit - Module cleanup.
8351 **/
8352 static void __exit
qla2x00_module_exit(void)8353 qla2x00_module_exit(void)
8354 {
8355 pci_unregister_driver(&qla2xxx_pci_driver);
8356 qla2x00_release_firmware();
8357 kmem_cache_destroy(ctx_cachep);
8358 fc_release_transport(qla2xxx_transport_vport_template);
8359 if (apidev_major >= 0)
8360 unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
8361 fc_release_transport(qla2xxx_transport_template);
8362 qlt_exit();
8363 kmem_cache_destroy(srb_cachep);
8364 qla_trace_uninit();
8365 }
8366
8367 module_init(qla2x00_module_init);
8368 module_exit(qla2x00_module_exit);
8369
8370 MODULE_AUTHOR("QLogic Corporation");
8371 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
8372 MODULE_LICENSE("GPL");
8373 MODULE_FIRMWARE(FW_FILE_ISP21XX);
8374 MODULE_FIRMWARE(FW_FILE_ISP22XX);
8375 MODULE_FIRMWARE(FW_FILE_ISP2300);
8376 MODULE_FIRMWARE(FW_FILE_ISP2322);
8377 MODULE_FIRMWARE(FW_FILE_ISP24XX);
8378 MODULE_FIRMWARE(FW_FILE_ISP25XX);
8379