xref: /linux/drivers/scsi/qla2xxx/qla_tmpl.c (revision bda552a7)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_tmpl.h"
9 
10 #define ISPREG(vha)	(&(vha)->hw->iobase->isp24)
11 #define IOBAR(reg)	offsetof(typeof(*(reg)), iobase_addr)
12 #define IOBASE(vha)	IOBAR(ISPREG(vha))
13 #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
14 
15 /* hardware_lock assumed held. */
16 static void
17 qla27xx_write_remote_reg(struct scsi_qla_host *vha,
18 			 u32 addr, u32 data)
19 {
20 	char *reg = (char *)ISPREG(vha);
21 
22 	ql_dbg(ql_dbg_misc, vha, 0xd300,
23 	       "%s: addr/data = %xh/%xh\n", __func__, addr, data);
24 
25 	WRT_REG_DWORD(reg + IOBASE(vha), 0x40);
26 	WRT_REG_DWORD(reg + 0xc4, data);
27 	WRT_REG_DWORD(reg + 0xc0, addr);
28 }
29 
30 void
31 qla27xx_reset_mpi(scsi_qla_host_t *vha)
32 {
33 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
34 	       "Entered %s.\n", __func__);
35 
36 	qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
37 	qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
38 
39 	vha->hw->stat.num_mpi_reset++;
40 }
41 
42 static inline void
43 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
44 {
45 	if (buf) {
46 		buf += *len;
47 		*(__le16 *)buf = cpu_to_le16(value);
48 	}
49 	*len += sizeof(value);
50 }
51 
52 static inline void
53 qla27xx_insert32(uint32_t value, void *buf, ulong *len)
54 {
55 	if (buf) {
56 		buf += *len;
57 		*(__le32 *)buf = cpu_to_le32(value);
58 	}
59 	*len += sizeof(value);
60 }
61 
62 static inline void
63 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
64 {
65 	if (buf && mem && size) {
66 		buf += *len;
67 		memcpy(buf, mem, size);
68 	}
69 	*len += size;
70 }
71 
72 static inline void
73 qla27xx_read8(void __iomem *window, void *buf, ulong *len)
74 {
75 	uint8_t value = ~0;
76 
77 	if (buf) {
78 		value = RD_REG_BYTE(window);
79 	}
80 	qla27xx_insert32(value, buf, len);
81 }
82 
83 static inline void
84 qla27xx_read16(void __iomem *window, void *buf, ulong *len)
85 {
86 	uint16_t value = ~0;
87 
88 	if (buf) {
89 		value = RD_REG_WORD(window);
90 	}
91 	qla27xx_insert32(value, buf, len);
92 }
93 
94 static inline void
95 qla27xx_read32(void __iomem *window, void *buf, ulong *len)
96 {
97 	uint32_t value = ~0;
98 
99 	if (buf) {
100 		value = RD_REG_DWORD(window);
101 	}
102 	qla27xx_insert32(value, buf, len);
103 }
104 
105 static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
106 {
107 	return
108 	    (width == 1) ? qla27xx_read8 :
109 	    (width == 2) ? qla27xx_read16 :
110 			   qla27xx_read32;
111 }
112 
113 static inline void
114 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
115 	uint offset, void *buf, ulong *len)
116 {
117 	void __iomem *window = (void __iomem *)reg + offset;
118 
119 	qla27xx_read32(window, buf, len);
120 }
121 
122 static inline void
123 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
124 	uint offset, uint32_t data, void *buf)
125 {
126 	if (buf) {
127 		void __iomem *window = (void __iomem *)reg + offset;
128 
129 		WRT_REG_DWORD(window, data);
130 	}
131 }
132 
133 static inline void
134 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
135 	uint32_t addr, uint offset, uint count, uint width, void *buf,
136 	ulong *len)
137 {
138 	void __iomem *window = (void __iomem *)reg + offset;
139 	void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
140 
141 	qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
142 	while (count--) {
143 		qla27xx_insert32(addr, buf, len);
144 		readn(window, buf, len);
145 		window += width;
146 		addr++;
147 	}
148 }
149 
150 static inline void
151 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
152 {
153 	if (buf)
154 		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
155 }
156 
157 static inline struct qla27xx_fwdt_entry *
158 qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
159 {
160 	return (void *)ent + le32_to_cpu(ent->hdr.size);
161 }
162 
163 static struct qla27xx_fwdt_entry *
164 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
165 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
166 {
167 	ql_dbg(ql_dbg_misc, vha, 0xd100,
168 	    "%s: nop [%lx]\n", __func__, *len);
169 	qla27xx_skip_entry(ent, buf);
170 
171 	return qla27xx_next_entry(ent);
172 }
173 
174 static struct qla27xx_fwdt_entry *
175 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
176 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
177 {
178 	ql_dbg(ql_dbg_misc, vha, 0xd1ff,
179 	    "%s: end [%lx]\n", __func__, *len);
180 	qla27xx_skip_entry(ent, buf);
181 
182 	/* terminate */
183 	return NULL;
184 }
185 
186 static struct qla27xx_fwdt_entry *
187 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
188 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
189 {
190 	ulong addr = le32_to_cpu(ent->t256.base_addr);
191 	uint offset = ent->t256.pci_offset;
192 	ulong count = le16_to_cpu(ent->t256.reg_count);
193 	uint width = ent->t256.reg_width;
194 
195 	ql_dbg(ql_dbg_misc, vha, 0xd200,
196 	    "%s: rdio t1 [%lx]\n", __func__, *len);
197 	qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
198 
199 	return qla27xx_next_entry(ent);
200 }
201 
202 static struct qla27xx_fwdt_entry *
203 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
204 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
205 {
206 	ulong addr = le32_to_cpu(ent->t257.base_addr);
207 	uint offset = ent->t257.pci_offset;
208 	ulong data = le32_to_cpu(ent->t257.write_data);
209 
210 	ql_dbg(ql_dbg_misc, vha, 0xd201,
211 	    "%s: wrio t1 [%lx]\n", __func__, *len);
212 	qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
213 	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
214 
215 	return qla27xx_next_entry(ent);
216 }
217 
218 static struct qla27xx_fwdt_entry *
219 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
220 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
221 {
222 	uint banksel = ent->t258.banksel_offset;
223 	ulong bank = le32_to_cpu(ent->t258.bank);
224 	ulong addr = le32_to_cpu(ent->t258.base_addr);
225 	uint offset = ent->t258.pci_offset;
226 	uint count = le16_to_cpu(ent->t258.reg_count);
227 	uint width = ent->t258.reg_width;
228 
229 	ql_dbg(ql_dbg_misc, vha, 0xd202,
230 	    "%s: rdio t2 [%lx]\n", __func__, *len);
231 	qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
232 	qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
233 
234 	return qla27xx_next_entry(ent);
235 }
236 
237 static struct qla27xx_fwdt_entry *
238 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
239 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
240 {
241 	ulong addr = le32_to_cpu(ent->t259.base_addr);
242 	uint banksel = ent->t259.banksel_offset;
243 	ulong bank = le32_to_cpu(ent->t259.bank);
244 	uint offset = ent->t259.pci_offset;
245 	ulong data = le32_to_cpu(ent->t259.write_data);
246 
247 	ql_dbg(ql_dbg_misc, vha, 0xd203,
248 	    "%s: wrio t2 [%lx]\n", __func__, *len);
249 	qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
250 	qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
251 	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
252 
253 	return qla27xx_next_entry(ent);
254 }
255 
256 static struct qla27xx_fwdt_entry *
257 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
258 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
259 {
260 	uint offset = ent->t260.pci_offset;
261 
262 	ql_dbg(ql_dbg_misc, vha, 0xd204,
263 	    "%s: rdpci [%lx]\n", __func__, *len);
264 	qla27xx_insert32(offset, buf, len);
265 	qla27xx_read_reg(ISPREG(vha), offset, buf, len);
266 
267 	return qla27xx_next_entry(ent);
268 }
269 
270 static struct qla27xx_fwdt_entry *
271 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
272 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
273 {
274 	uint offset = ent->t261.pci_offset;
275 	ulong data = le32_to_cpu(ent->t261.write_data);
276 
277 	ql_dbg(ql_dbg_misc, vha, 0xd205,
278 	    "%s: wrpci [%lx]\n", __func__, *len);
279 	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
280 
281 	return qla27xx_next_entry(ent);
282 }
283 
284 static struct qla27xx_fwdt_entry *
285 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
286 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
287 {
288 	uint area = ent->t262.ram_area;
289 	ulong start = le32_to_cpu(ent->t262.start_addr);
290 	ulong end = le32_to_cpu(ent->t262.end_addr);
291 	ulong dwords;
292 	int rc;
293 
294 	ql_dbg(ql_dbg_misc, vha, 0xd206,
295 	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
296 
297 	if (area == T262_RAM_AREA_CRITICAL_RAM) {
298 		;
299 	} else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
300 		end = vha->hw->fw_memory_size;
301 		if (buf)
302 			ent->t262.end_addr = cpu_to_le32(end);
303 	} else if (area == T262_RAM_AREA_SHARED_RAM) {
304 		start = vha->hw->fw_shared_ram_start;
305 		end = vha->hw->fw_shared_ram_end;
306 		if (buf) {
307 			ent->t262.start_addr = cpu_to_le32(start);
308 			ent->t262.end_addr = cpu_to_le32(end);
309 		}
310 	} else if (area == T262_RAM_AREA_DDR_RAM) {
311 		start = vha->hw->fw_ddr_ram_start;
312 		end = vha->hw->fw_ddr_ram_end;
313 		if (buf) {
314 			ent->t262.start_addr = cpu_to_le32(start);
315 			ent->t262.end_addr = cpu_to_le32(end);
316 		}
317 	} else if (area == T262_RAM_AREA_MISC) {
318 		if (buf) {
319 			ent->t262.start_addr = cpu_to_le32(start);
320 			ent->t262.end_addr = cpu_to_le32(end);
321 		}
322 	} else {
323 		ql_dbg(ql_dbg_misc, vha, 0xd022,
324 		    "%s: unknown area %x\n", __func__, area);
325 		qla27xx_skip_entry(ent, buf);
326 		goto done;
327 	}
328 
329 	if (end < start || start == 0 || end == 0) {
330 		ql_dbg(ql_dbg_misc, vha, 0xd023,
331 		    "%s: unusable range (start=%lx end=%lx)\n",
332 		    __func__, start, end);
333 		qla27xx_skip_entry(ent, buf);
334 		goto done;
335 	}
336 
337 	dwords = end - start + 1;
338 	if (buf) {
339 		buf += *len;
340 		rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
341 		if (rc != QLA_SUCCESS) {
342 			ql_dbg(ql_dbg_async, vha, 0xffff,
343 			    "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
344 			    __func__, area, start, end);
345 			return INVALID_ENTRY;
346 		}
347 	}
348 	*len += dwords * sizeof(uint32_t);
349 done:
350 	return qla27xx_next_entry(ent);
351 }
352 
353 static struct qla27xx_fwdt_entry *
354 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
355 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
356 {
357 	uint type = ent->t263.queue_type;
358 	uint count = 0;
359 	uint i;
360 	uint length;
361 
362 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
363 	    "%s: getq(%x) [%lx]\n", __func__, type, *len);
364 	if (type == T263_QUEUE_TYPE_REQ) {
365 		for (i = 0; i < vha->hw->max_req_queues; i++) {
366 			struct req_que *req = vha->hw->req_q_map[i];
367 
368 			if (req || !buf) {
369 				length = req ?
370 				    req->length : REQUEST_ENTRY_CNT_24XX;
371 				qla27xx_insert16(i, buf, len);
372 				qla27xx_insert16(length, buf, len);
373 				qla27xx_insertbuf(req ? req->ring : NULL,
374 				    length * sizeof(*req->ring), buf, len);
375 				count++;
376 			}
377 		}
378 	} else if (type == T263_QUEUE_TYPE_RSP) {
379 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
380 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
381 
382 			if (rsp || !buf) {
383 				length = rsp ?
384 				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
385 				qla27xx_insert16(i, buf, len);
386 				qla27xx_insert16(length, buf, len);
387 				qla27xx_insertbuf(rsp ? rsp->ring : NULL,
388 				    length * sizeof(*rsp->ring), buf, len);
389 				count++;
390 			}
391 		}
392 	} else if (QLA_TGT_MODE_ENABLED() &&
393 	    ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
394 		struct qla_hw_data *ha = vha->hw;
395 		struct atio *atr = ha->tgt.atio_ring;
396 
397 		if (atr || !buf) {
398 			length = ha->tgt.atio_q_length;
399 			qla27xx_insert16(0, buf, len);
400 			qla27xx_insert16(length, buf, len);
401 			qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
402 			count++;
403 		}
404 	} else {
405 		ql_dbg(ql_dbg_misc, vha, 0xd026,
406 		    "%s: unknown queue %x\n", __func__, type);
407 		qla27xx_skip_entry(ent, buf);
408 	}
409 
410 	if (buf) {
411 		if (count)
412 			ent->t263.num_queues = count;
413 		else
414 			qla27xx_skip_entry(ent, buf);
415 	}
416 
417 	return qla27xx_next_entry(ent);
418 }
419 
420 static struct qla27xx_fwdt_entry *
421 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
422 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
423 {
424 	ql_dbg(ql_dbg_misc, vha, 0xd208,
425 	    "%s: getfce [%lx]\n", __func__, *len);
426 	if (vha->hw->fce) {
427 		if (buf) {
428 			ent->t264.fce_trace_size = FCE_SIZE;
429 			ent->t264.write_pointer = vha->hw->fce_wr;
430 			ent->t264.base_pointer = vha->hw->fce_dma;
431 			ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
432 			ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
433 			ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
434 			ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
435 			ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
436 			ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
437 		}
438 		qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
439 	} else {
440 		ql_dbg(ql_dbg_misc, vha, 0xd027,
441 		    "%s: missing fce\n", __func__);
442 		qla27xx_skip_entry(ent, buf);
443 	}
444 
445 	return qla27xx_next_entry(ent);
446 }
447 
448 static struct qla27xx_fwdt_entry *
449 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
450 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
451 {
452 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
453 	    "%s: pause risc [%lx]\n", __func__, *len);
454 	if (buf)
455 		qla24xx_pause_risc(ISPREG(vha), vha->hw);
456 
457 	return qla27xx_next_entry(ent);
458 }
459 
460 static struct qla27xx_fwdt_entry *
461 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
462 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
463 {
464 	ql_dbg(ql_dbg_misc, vha, 0xd20a,
465 	    "%s: reset risc [%lx]\n", __func__, *len);
466 	if (buf)
467 		WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
468 
469 	return qla27xx_next_entry(ent);
470 }
471 
472 static struct qla27xx_fwdt_entry *
473 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
474 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
475 {
476 	uint offset = ent->t267.pci_offset;
477 	ulong data = le32_to_cpu(ent->t267.data);
478 
479 	ql_dbg(ql_dbg_misc, vha, 0xd20b,
480 	    "%s: dis intr [%lx]\n", __func__, *len);
481 	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
482 
483 	return qla27xx_next_entry(ent);
484 }
485 
486 static struct qla27xx_fwdt_entry *
487 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
488 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
489 {
490 	ql_dbg(ql_dbg_misc, vha, 0xd20c,
491 	    "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
492 	switch (ent->t268.buf_type) {
493 	case T268_BUF_TYPE_EXTD_TRACE:
494 		if (vha->hw->eft) {
495 			if (buf) {
496 				ent->t268.buf_size = EFT_SIZE;
497 				ent->t268.start_addr = vha->hw->eft_dma;
498 			}
499 			qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
500 		} else {
501 			ql_dbg(ql_dbg_misc, vha, 0xd028,
502 			    "%s: missing eft\n", __func__);
503 			qla27xx_skip_entry(ent, buf);
504 		}
505 		break;
506 	case T268_BUF_TYPE_EXCH_BUFOFF:
507 		if (vha->hw->exchoffld_buf) {
508 			if (buf) {
509 				ent->t268.buf_size = vha->hw->exchoffld_size;
510 				ent->t268.start_addr =
511 					vha->hw->exchoffld_buf_dma;
512 			}
513 			qla27xx_insertbuf(vha->hw->exchoffld_buf,
514 			    vha->hw->exchoffld_size, buf, len);
515 		} else {
516 			ql_dbg(ql_dbg_misc, vha, 0xd028,
517 			    "%s: missing exch offld\n", __func__);
518 			qla27xx_skip_entry(ent, buf);
519 		}
520 		break;
521 	case T268_BUF_TYPE_EXTD_LOGIN:
522 		if (vha->hw->exlogin_buf) {
523 			if (buf) {
524 				ent->t268.buf_size = vha->hw->exlogin_size;
525 				ent->t268.start_addr =
526 					vha->hw->exlogin_buf_dma;
527 			}
528 			qla27xx_insertbuf(vha->hw->exlogin_buf,
529 			    vha->hw->exlogin_size, buf, len);
530 		} else {
531 			ql_dbg(ql_dbg_misc, vha, 0xd028,
532 			    "%s: missing ext login\n", __func__);
533 			qla27xx_skip_entry(ent, buf);
534 		}
535 		break;
536 
537 	case T268_BUF_TYPE_REQ_MIRROR:
538 	case T268_BUF_TYPE_RSP_MIRROR:
539 		/*
540 		 * Mirror pointers are not implemented in the
541 		 * driver, instead shadow pointers are used by
542 		 * the drier. Skip these entries.
543 		 */
544 		qla27xx_skip_entry(ent, buf);
545 		break;
546 	default:
547 		ql_dbg(ql_dbg_async, vha, 0xd02b,
548 		    "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
549 		qla27xx_skip_entry(ent, buf);
550 		break;
551 	}
552 
553 	return qla27xx_next_entry(ent);
554 }
555 
556 static struct qla27xx_fwdt_entry *
557 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
558 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
559 {
560 	ql_dbg(ql_dbg_misc, vha, 0xd20d,
561 	    "%s: scratch [%lx]\n", __func__, *len);
562 	qla27xx_insert32(0xaaaaaaaa, buf, len);
563 	qla27xx_insert32(0xbbbbbbbb, buf, len);
564 	qla27xx_insert32(0xcccccccc, buf, len);
565 	qla27xx_insert32(0xdddddddd, buf, len);
566 	qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
567 	if (buf)
568 		ent->t269.scratch_size = 5 * sizeof(uint32_t);
569 
570 	return qla27xx_next_entry(ent);
571 }
572 
573 static struct qla27xx_fwdt_entry *
574 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
575 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
576 {
577 	ulong addr = le32_to_cpu(ent->t270.addr);
578 	ulong dwords = le32_to_cpu(ent->t270.count);
579 
580 	ql_dbg(ql_dbg_misc, vha, 0xd20e,
581 	    "%s: rdremreg [%lx]\n", __func__, *len);
582 	qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
583 	while (dwords--) {
584 		qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
585 		qla27xx_insert32(addr, buf, len);
586 		qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
587 		addr += sizeof(uint32_t);
588 	}
589 
590 	return qla27xx_next_entry(ent);
591 }
592 
593 static struct qla27xx_fwdt_entry *
594 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
595 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
596 {
597 	ulong addr = le32_to_cpu(ent->t271.addr);
598 	ulong data = le32_to_cpu(ent->t271.data);
599 
600 	ql_dbg(ql_dbg_misc, vha, 0xd20f,
601 	    "%s: wrremreg [%lx]\n", __func__, *len);
602 	qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
603 	qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
604 	qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
605 
606 	return qla27xx_next_entry(ent);
607 }
608 
609 static struct qla27xx_fwdt_entry *
610 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
611 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
612 {
613 	ulong dwords = le32_to_cpu(ent->t272.count);
614 	ulong start = le32_to_cpu(ent->t272.addr);
615 
616 	ql_dbg(ql_dbg_misc, vha, 0xd210,
617 	    "%s: rdremram [%lx]\n", __func__, *len);
618 	if (buf) {
619 		ql_dbg(ql_dbg_misc, vha, 0xd02c,
620 		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
621 		buf += *len;
622 		qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
623 	}
624 	*len += dwords * sizeof(uint32_t);
625 
626 	return qla27xx_next_entry(ent);
627 }
628 
629 static struct qla27xx_fwdt_entry *
630 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
631 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
632 {
633 	ulong dwords = le32_to_cpu(ent->t273.count);
634 	ulong addr = le32_to_cpu(ent->t273.addr);
635 	uint32_t value;
636 
637 	ql_dbg(ql_dbg_misc, vha, 0xd211,
638 	    "%s: pcicfg [%lx]\n", __func__, *len);
639 	while (dwords--) {
640 		value = ~0;
641 		if (pci_read_config_dword(vha->hw->pdev, addr, &value))
642 			ql_dbg(ql_dbg_misc, vha, 0xd02d,
643 			    "%s: failed pcicfg read at %lx\n", __func__, addr);
644 		qla27xx_insert32(addr, buf, len);
645 		qla27xx_insert32(value, buf, len);
646 		addr += sizeof(uint32_t);
647 	}
648 
649 	return qla27xx_next_entry(ent);
650 }
651 
652 static struct qla27xx_fwdt_entry *
653 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
654 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
655 {
656 	ulong type = ent->t274.queue_type;
657 	uint count = 0;
658 	uint i;
659 
660 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
661 	    "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
662 	if (type == T274_QUEUE_TYPE_REQ_SHAD) {
663 		for (i = 0; i < vha->hw->max_req_queues; i++) {
664 			struct req_que *req = vha->hw->req_q_map[i];
665 
666 			if (req || !buf) {
667 				qla27xx_insert16(i, buf, len);
668 				qla27xx_insert16(1, buf, len);
669 				qla27xx_insert32(req && req->out_ptr ?
670 				    *req->out_ptr : 0, buf, len);
671 				count++;
672 			}
673 		}
674 	} else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
675 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
676 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
677 
678 			if (rsp || !buf) {
679 				qla27xx_insert16(i, buf, len);
680 				qla27xx_insert16(1, buf, len);
681 				qla27xx_insert32(rsp && rsp->in_ptr ?
682 				    *rsp->in_ptr : 0, buf, len);
683 				count++;
684 			}
685 		}
686 	} else if (QLA_TGT_MODE_ENABLED() &&
687 	    ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
688 		struct qla_hw_data *ha = vha->hw;
689 		struct atio *atr = ha->tgt.atio_ring_ptr;
690 
691 		if (atr || !buf) {
692 			qla27xx_insert16(0, buf, len);
693 			qla27xx_insert16(1, buf, len);
694 			qla27xx_insert32(ha->tgt.atio_q_in ?
695 			    readl(ha->tgt.atio_q_in) : 0, buf, len);
696 			count++;
697 		}
698 	} else {
699 		ql_dbg(ql_dbg_misc, vha, 0xd02f,
700 		    "%s: unknown queue %lx\n", __func__, type);
701 		qla27xx_skip_entry(ent, buf);
702 	}
703 
704 	if (buf) {
705 		if (count)
706 			ent->t274.num_queues = count;
707 		else
708 			qla27xx_skip_entry(ent, buf);
709 	}
710 
711 	return qla27xx_next_entry(ent);
712 }
713 
714 static struct qla27xx_fwdt_entry *
715 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
716 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
717 {
718 	ulong offset = offsetof(typeof(*ent), t275.buffer);
719 	ulong length = le32_to_cpu(ent->t275.length);
720 	ulong size = le32_to_cpu(ent->hdr.size);
721 	void *buffer = ent->t275.buffer;
722 
723 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
724 	    "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
725 	if (!length) {
726 		ql_dbg(ql_dbg_misc, vha, 0xd020,
727 		    "%s: buffer zero length\n", __func__);
728 		qla27xx_skip_entry(ent, buf);
729 		goto done;
730 	}
731 	if (offset + length > size) {
732 		length = size - offset;
733 		ql_dbg(ql_dbg_misc, vha, 0xd030,
734 		    "%s: buffer overflow, truncate [%lx]\n", __func__, length);
735 		ent->t275.length = cpu_to_le32(length);
736 	}
737 
738 	qla27xx_insertbuf(buffer, length, buf, len);
739 done:
740 	return qla27xx_next_entry(ent);
741 }
742 
743 static struct qla27xx_fwdt_entry *
744 qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
745     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
746 {
747 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
748 	    "%s: cond [%lx]\n", __func__, *len);
749 
750 	if (buf) {
751 		ulong cond1 = le32_to_cpu(ent->t276.cond1);
752 		ulong cond2 = le32_to_cpu(ent->t276.cond2);
753 		uint type = vha->hw->pdev->device >> 4 & 0xf;
754 		uint func = vha->hw->port_no & 0x3;
755 
756 		if (type != cond1 || func != cond2) {
757 			struct qla27xx_fwdt_template *tmp = buf;
758 
759 			tmp->count--;
760 			ent = qla27xx_next_entry(ent);
761 			qla27xx_skip_entry(ent, buf);
762 		}
763 	}
764 
765 	return qla27xx_next_entry(ent);
766 }
767 
768 static struct qla27xx_fwdt_entry *
769 qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
770     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
771 {
772 	ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
773 	ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
774 	ulong data_addr = le32_to_cpu(ent->t277.data_addr);
775 
776 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
777 	    "%s: rdpep [%lx]\n", __func__, *len);
778 	qla27xx_insert32(wr_cmd_data, buf, len);
779 	qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
780 	qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
781 
782 	return qla27xx_next_entry(ent);
783 }
784 
785 static struct qla27xx_fwdt_entry *
786 qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
787     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
788 {
789 	ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
790 	ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
791 	ulong data_addr = le32_to_cpu(ent->t278.data_addr);
792 	ulong wr_data = le32_to_cpu(ent->t278.wr_data);
793 
794 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
795 	    "%s: wrpep [%lx]\n", __func__, *len);
796 	qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
797 	qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
798 
799 	return qla27xx_next_entry(ent);
800 }
801 
802 static struct qla27xx_fwdt_entry *
803 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
804 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
805 {
806 	ulong type = le32_to_cpu(ent->hdr.type);
807 
808 	ql_dbg(ql_dbg_misc, vha, 0xd2ff,
809 	    "%s: other %lx [%lx]\n", __func__, type, *len);
810 	qla27xx_skip_entry(ent, buf);
811 
812 	return qla27xx_next_entry(ent);
813 }
814 
815 static struct {
816 	uint type;
817 	typeof(qla27xx_fwdt_entry_other)(*call);
818 } qla27xx_fwdt_entry_call[] = {
819 	{ ENTRY_TYPE_NOP,		qla27xx_fwdt_entry_t0    },
820 	{ ENTRY_TYPE_TMP_END,		qla27xx_fwdt_entry_t255  },
821 	{ ENTRY_TYPE_RD_IOB_T1,		qla27xx_fwdt_entry_t256  },
822 	{ ENTRY_TYPE_WR_IOB_T1,		qla27xx_fwdt_entry_t257  },
823 	{ ENTRY_TYPE_RD_IOB_T2,		qla27xx_fwdt_entry_t258  },
824 	{ ENTRY_TYPE_WR_IOB_T2,		qla27xx_fwdt_entry_t259  },
825 	{ ENTRY_TYPE_RD_PCI,		qla27xx_fwdt_entry_t260  },
826 	{ ENTRY_TYPE_WR_PCI,		qla27xx_fwdt_entry_t261  },
827 	{ ENTRY_TYPE_RD_RAM,		qla27xx_fwdt_entry_t262  },
828 	{ ENTRY_TYPE_GET_QUEUE,		qla27xx_fwdt_entry_t263  },
829 	{ ENTRY_TYPE_GET_FCE,		qla27xx_fwdt_entry_t264  },
830 	{ ENTRY_TYPE_PSE_RISC,		qla27xx_fwdt_entry_t265  },
831 	{ ENTRY_TYPE_RST_RISC,		qla27xx_fwdt_entry_t266  },
832 	{ ENTRY_TYPE_DIS_INTR,		qla27xx_fwdt_entry_t267  },
833 	{ ENTRY_TYPE_GET_HBUF,		qla27xx_fwdt_entry_t268  },
834 	{ ENTRY_TYPE_SCRATCH,		qla27xx_fwdt_entry_t269  },
835 	{ ENTRY_TYPE_RDREMREG,		qla27xx_fwdt_entry_t270  },
836 	{ ENTRY_TYPE_WRREMREG,		qla27xx_fwdt_entry_t271  },
837 	{ ENTRY_TYPE_RDREMRAM,		qla27xx_fwdt_entry_t272  },
838 	{ ENTRY_TYPE_PCICFG,		qla27xx_fwdt_entry_t273  },
839 	{ ENTRY_TYPE_GET_SHADOW,	qla27xx_fwdt_entry_t274  },
840 	{ ENTRY_TYPE_WRITE_BUF,		qla27xx_fwdt_entry_t275  },
841 	{ ENTRY_TYPE_CONDITIONAL,	qla27xx_fwdt_entry_t276  },
842 	{ ENTRY_TYPE_RDPEPREG,		qla27xx_fwdt_entry_t277  },
843 	{ ENTRY_TYPE_WRPEPREG,		qla27xx_fwdt_entry_t278  },
844 	{ -1,				qla27xx_fwdt_entry_other }
845 };
846 
847 static inline
848 typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
849 {
850 	typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
851 
852 	while (list->type < type)
853 		list++;
854 
855 	if (list->type == type)
856 		return list->call;
857 	return qla27xx_fwdt_entry_other;
858 }
859 
860 static void
861 qla27xx_walk_template(struct scsi_qla_host *vha,
862 	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
863 {
864 	struct qla27xx_fwdt_entry *ent = (void *)tmp +
865 	    le32_to_cpu(tmp->entry_offset);
866 	ulong type;
867 
868 	tmp->count = le32_to_cpu(tmp->entry_count);
869 	ql_dbg(ql_dbg_misc, vha, 0xd01a,
870 	    "%s: entry count %u\n", __func__, tmp->count);
871 	while (ent && tmp->count--) {
872 		type = le32_to_cpu(ent->hdr.type);
873 		ent = qla27xx_find_entry(type)(vha, ent, buf, len);
874 		if (!ent)
875 			break;
876 
877 		if (ent == INVALID_ENTRY) {
878 			*len = 0;
879 			ql_dbg(ql_dbg_async, vha, 0xffff,
880 			    "Unable to capture FW dump");
881 			goto bailout;
882 		}
883 	}
884 
885 	if (tmp->count)
886 		ql_dbg(ql_dbg_misc, vha, 0xd018,
887 		    "%s: entry count residual=+%u\n", __func__, tmp->count);
888 
889 	if (ent)
890 		ql_dbg(ql_dbg_misc, vha, 0xd019,
891 		    "%s: missing end entry\n", __func__);
892 
893 bailout:
894 	cpu_to_le32s(&tmp->count);	/* endianize residual count */
895 }
896 
897 static void
898 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
899 {
900 	tmp->capture_timestamp = cpu_to_le32(jiffies);
901 }
902 
903 static void
904 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
905 {
906 	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
907 
908 	WARN_ON_ONCE(sscanf(qla2x00_version_str,
909 			    "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
910 			    v+0, v+1, v+2, v+3, v+4, v+5) != 6);
911 
912 	tmp->driver_info[0] = cpu_to_le32(
913 		v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
914 	tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
915 	tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
916 }
917 
918 static void
919 qla27xx_firmware_info(struct scsi_qla_host *vha,
920     struct qla27xx_fwdt_template *tmp)
921 {
922 	tmp->firmware_version[0] = vha->hw->fw_major_version;
923 	tmp->firmware_version[1] = vha->hw->fw_minor_version;
924 	tmp->firmware_version[2] = vha->hw->fw_subminor_version;
925 	tmp->firmware_version[3] = cpu_to_le32(
926 		vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
927 	tmp->firmware_version[4] = cpu_to_le32(
928 	  vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
929 }
930 
931 static void
932 ql27xx_edit_template(struct scsi_qla_host *vha,
933 	struct qla27xx_fwdt_template *tmp)
934 {
935 	qla27xx_time_stamp(tmp);
936 	qla27xx_driver_info(tmp);
937 	qla27xx_firmware_info(vha, tmp);
938 }
939 
940 static inline uint32_t
941 qla27xx_template_checksum(void *p, ulong size)
942 {
943 	__le32 *buf = p;
944 	uint64_t sum = 0;
945 
946 	size /= sizeof(*buf);
947 
948 	for ( ; size--; buf++)
949 		sum += le32_to_cpu(*buf);
950 
951 	sum = (sum & 0xffffffff) + (sum >> 32);
952 
953 	return ~sum;
954 }
955 
956 static inline int
957 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
958 {
959 	return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
960 }
961 
962 static inline int
963 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
964 {
965 	return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
966 }
967 
968 static ulong
969 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
970     struct qla27xx_fwdt_template *tmp, void *buf)
971 {
972 	ulong len = 0;
973 
974 	if (qla27xx_fwdt_template_valid(tmp)) {
975 		len = tmp->template_size;
976 		tmp = memcpy(buf, tmp, len);
977 		ql27xx_edit_template(vha, tmp);
978 		qla27xx_walk_template(vha, tmp, buf, &len);
979 	}
980 
981 	return len;
982 }
983 
984 ulong
985 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
986 {
987 	struct qla27xx_fwdt_template *tmp = p;
988 	ulong len = 0;
989 
990 	if (qla27xx_fwdt_template_valid(tmp)) {
991 		len = tmp->template_size;
992 		qla27xx_walk_template(vha, tmp, NULL, &len);
993 	}
994 
995 	return len;
996 }
997 
998 ulong
999 qla27xx_fwdt_template_size(void *p)
1000 {
1001 	struct qla27xx_fwdt_template *tmp = p;
1002 
1003 	return tmp->template_size;
1004 }
1005 
1006 int
1007 qla27xx_fwdt_template_valid(void *p)
1008 {
1009 	struct qla27xx_fwdt_template *tmp = p;
1010 
1011 	if (!qla27xx_verify_template_header(tmp)) {
1012 		ql_log(ql_log_warn, NULL, 0xd01c,
1013 		    "%s: template type %x\n", __func__,
1014 		    le32_to_cpu(tmp->template_type));
1015 		return false;
1016 	}
1017 
1018 	if (!qla27xx_verify_template_checksum(tmp)) {
1019 		ql_log(ql_log_warn, NULL, 0xd01d,
1020 		    "%s: failed template checksum\n", __func__);
1021 		return false;
1022 	}
1023 
1024 	return true;
1025 }
1026 
1027 void
1028 qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1029 {
1030 	ulong flags = 0;
1031 	bool need_mpi_reset = true;
1032 
1033 #ifndef __CHECKER__
1034 	if (!hardware_locked)
1035 		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1036 #endif
1037 	if (!vha->hw->mpi_fw_dump) {
1038 		ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
1039 	} else if (vha->hw->mpi_fw_dumped) {
1040 		ql_log(ql_log_warn, vha, 0x02f4,
1041 		       "-> MPI firmware already dumped (%p) -- ignoring request\n",
1042 		       vha->hw->mpi_fw_dump);
1043 	} else {
1044 		struct fwdt *fwdt = &vha->hw->fwdt[1];
1045 		ulong len;
1046 		void *buf = vha->hw->mpi_fw_dump;
1047 
1048 		ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
1049 		if (!fwdt->template) {
1050 			ql_log(ql_log_warn, vha, 0x02f6,
1051 			       "-> fwdt1 no template\n");
1052 			goto bailout;
1053 		}
1054 		len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
1055 		if (len == 0) {
1056 			goto bailout;
1057 		} else if (len != fwdt->dump_size) {
1058 			ql_log(ql_log_warn, vha, 0x02f7,
1059 			       "-> fwdt1 fwdump residual=%+ld\n",
1060 			       fwdt->dump_size - len);
1061 		} else {
1062 			need_mpi_reset = false;
1063 		}
1064 
1065 		vha->hw->mpi_fw_dump_len = len;
1066 		vha->hw->mpi_fw_dumped = 1;
1067 
1068 		ql_log(ql_log_warn, vha, 0x02f8,
1069 		       "-> MPI firmware dump saved to buffer (%lu/%p)\n",
1070 		       vha->host_no, vha->hw->mpi_fw_dump);
1071 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
1072 	}
1073 
1074 bailout:
1075 	if (need_mpi_reset)
1076 		qla27xx_reset_mpi(vha);
1077 #ifndef __CHECKER__
1078 	if (!hardware_locked)
1079 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1080 #endif
1081 }
1082 
1083 void
1084 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1085 {
1086 	ulong flags = 0;
1087 
1088 #ifndef __CHECKER__
1089 	if (!hardware_locked)
1090 		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1091 #endif
1092 
1093 	if (!vha->hw->fw_dump) {
1094 		ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
1095 	} else if (vha->hw->fw_dumped) {
1096 		ql_log(ql_log_warn, vha, 0xd01f,
1097 		    "-> Firmware already dumped (%p) -- ignoring request\n",
1098 		    vha->hw->fw_dump);
1099 	} else {
1100 		struct fwdt *fwdt = vha->hw->fwdt;
1101 		ulong len;
1102 		void *buf = vha->hw->fw_dump;
1103 
1104 		ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
1105 		if (!fwdt->template) {
1106 			ql_log(ql_log_warn, vha, 0xd012,
1107 			       "-> fwdt0 no template\n");
1108 			goto bailout;
1109 		}
1110 		len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
1111 		if (len == 0) {
1112 			goto bailout;
1113 		} else if (len != fwdt->dump_size) {
1114 			ql_log(ql_log_warn, vha, 0xd013,
1115 			       "-> fwdt0 fwdump residual=%+ld\n",
1116 				fwdt->dump_size - len);
1117 		}
1118 
1119 		vha->hw->fw_dump_len = len;
1120 		vha->hw->fw_dumped = 1;
1121 
1122 		ql_log(ql_log_warn, vha, 0xd015,
1123 		    "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1124 		    vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
1125 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
1126 	}
1127 
1128 bailout:
1129 #ifndef __CHECKER__
1130 	if (!hardware_locked)
1131 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1132 #endif
1133 }
1134