xref: /freebsd/sys/dev/qlxgbe/ql_hw.c (revision e17f5b1d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include "ql_minidump.h"
47 
48 /*
49  * Static Functions
50  */
51 
52 static void qla_del_rcv_cntxt(qla_host_t *ha);
53 static int qla_init_rcv_cntxt(qla_host_t *ha);
54 static int qla_del_xmt_cntxt(qla_host_t *ha);
55 static int qla_init_xmt_cntxt(qla_host_t *ha);
56 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
57 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
58 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
59 	uint32_t num_intrs, uint32_t create);
60 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
61 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
62 	int tenable, int rcv);
63 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
64 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65 
66 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67 		uint8_t *hdr);
68 static int qla_hw_add_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70 
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78 
79 static void ql_minidump_free(qla_host_t *ha);
80 
81 #ifdef QL_DBG
82 
83 static void
84 qla_stop_pegs(qla_host_t *ha)
85 {
86         uint32_t val = 1;
87 
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
92         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
93         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
94 }
95 
96 static int
97 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
98 {
99 	int err, ret = 0;
100 	qla_host_t *ha;
101 
102 	err = sysctl_handle_int(oidp, &ret, 0, req);
103 
104 
105 	if (err || !req->newptr)
106 		return (err);
107 
108 	if (ret == 1) {
109 		ha = (qla_host_t *)arg1;
110 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
111 			qla_stop_pegs(ha);
112 			QLA_UNLOCK(ha, __func__);
113 		}
114 	}
115 
116 	return err;
117 }
118 #endif /* #ifdef QL_DBG */
119 
120 static int
121 qla_validate_set_port_cfg_bit(uint32_t bits)
122 {
123         if ((bits & 0xF) > 1)
124                 return (-1);
125 
126         if (((bits >> 4) & 0xF) > 2)
127                 return (-1);
128 
129         if (((bits >> 8) & 0xF) > 2)
130                 return (-1);
131 
132         return (0);
133 }
134 
135 static int
136 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
137 {
138         int err, ret = 0;
139         qla_host_t *ha;
140         uint32_t cfg_bits;
141 
142         err = sysctl_handle_int(oidp, &ret, 0, req);
143 
144         if (err || !req->newptr)
145                 return (err);
146 
147 	ha = (qla_host_t *)arg1;
148 
149         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
150 
151                 err = qla_get_port_config(ha, &cfg_bits);
152 
153                 if (err)
154                         goto qla_sysctl_set_port_cfg_exit;
155 
156                 if (ret & 0x1) {
157                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 } else {
159                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
160                 }
161 
162                 ret = ret >> 4;
163                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
164 
165                 if ((ret & 0xF) == 0) {
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
167                 } else if ((ret & 0xF) == 1){
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
169                 } else {
170                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
171                 }
172 
173                 ret = ret >> 4;
174                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
175 
176                 if (ret == 0) {
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
178                 } else if (ret == 1){
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
180                 } else {
181                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
182                 }
183 
184 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
185                 	err = qla_set_port_config(ha, cfg_bits);
186 			QLA_UNLOCK(ha, __func__);
187 		} else {
188 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
189 		}
190         } else {
191 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
192                 	err = qla_get_port_config(ha, &cfg_bits);
193 			QLA_UNLOCK(ha, __func__);
194 		} else {
195 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
196 		}
197         }
198 
199 qla_sysctl_set_port_cfg_exit:
200         return err;
201 }
202 
203 static int
204 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
205 {
206 	int err, ret = 0;
207 	qla_host_t *ha;
208 
209 	err = sysctl_handle_int(oidp, &ret, 0, req);
210 
211 	if (err || !req->newptr)
212 		return (err);
213 
214 	ha = (qla_host_t *)arg1;
215 
216 	if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
217 		(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
218 
219 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
220 			err = qla_set_cam_search_mode(ha, (uint32_t)ret);
221 			QLA_UNLOCK(ha, __func__);
222 		} else {
223 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
224 		}
225 
226 	} else {
227 		device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
228 	}
229 
230 	return (err);
231 }
232 
233 static int
234 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
235 {
236 	int err, ret = 0;
237 	qla_host_t *ha;
238 
239 	err = sysctl_handle_int(oidp, &ret, 0, req);
240 
241 	if (err || !req->newptr)
242 		return (err);
243 
244 	ha = (qla_host_t *)arg1;
245 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
246 		err = qla_get_cam_search_mode(ha);
247 		QLA_UNLOCK(ha, __func__);
248 	} else {
249 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
250 	}
251 
252 	return (err);
253 }
254 
255 static void
256 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
257 {
258         struct sysctl_ctx_list  *ctx;
259         struct sysctl_oid_list  *children;
260         struct sysctl_oid       *ctx_oid;
261 
262         ctx = device_get_sysctl_ctx(ha->pci_dev);
263         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
264 
265         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
266 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_mac");
267         children = SYSCTL_CHILDREN(ctx_oid);
268 
269         SYSCTL_ADD_QUAD(ctx, children,
270                 OID_AUTO, "xmt_frames",
271                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
272                 "xmt_frames");
273 
274         SYSCTL_ADD_QUAD(ctx, children,
275                 OID_AUTO, "xmt_bytes",
276                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
277                 "xmt_frames");
278 
279         SYSCTL_ADD_QUAD(ctx, children,
280                 OID_AUTO, "xmt_mcast_pkts",
281                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
282                 "xmt_mcast_pkts");
283 
284         SYSCTL_ADD_QUAD(ctx, children,
285                 OID_AUTO, "xmt_bcast_pkts",
286                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
287                 "xmt_bcast_pkts");
288 
289         SYSCTL_ADD_QUAD(ctx, children,
290                 OID_AUTO, "xmt_pause_frames",
291                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
292                 "xmt_pause_frames");
293 
294         SYSCTL_ADD_QUAD(ctx, children,
295                 OID_AUTO, "xmt_cntrl_pkts",
296                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
297                 "xmt_cntrl_pkts");
298 
299         SYSCTL_ADD_QUAD(ctx, children,
300                 OID_AUTO, "xmt_pkt_lt_64bytes",
301                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
302                 "xmt_pkt_lt_64bytes");
303 
304         SYSCTL_ADD_QUAD(ctx, children,
305                 OID_AUTO, "xmt_pkt_lt_127bytes",
306                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
307                 "xmt_pkt_lt_127bytes");
308 
309         SYSCTL_ADD_QUAD(ctx, children,
310                 OID_AUTO, "xmt_pkt_lt_255bytes",
311                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
312                 "xmt_pkt_lt_255bytes");
313 
314         SYSCTL_ADD_QUAD(ctx, children,
315                 OID_AUTO, "xmt_pkt_lt_511bytes",
316                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
317                 "xmt_pkt_lt_511bytes");
318 
319         SYSCTL_ADD_QUAD(ctx, children,
320                 OID_AUTO, "xmt_pkt_lt_1023bytes",
321                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
322                 "xmt_pkt_lt_1023bytes");
323 
324         SYSCTL_ADD_QUAD(ctx, children,
325                 OID_AUTO, "xmt_pkt_lt_1518bytes",
326                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
327                 "xmt_pkt_lt_1518bytes");
328 
329         SYSCTL_ADD_QUAD(ctx, children,
330                 OID_AUTO, "xmt_pkt_gt_1518bytes",
331                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
332                 "xmt_pkt_gt_1518bytes");
333 
334         SYSCTL_ADD_QUAD(ctx, children,
335                 OID_AUTO, "rcv_frames",
336                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
337                 "rcv_frames");
338 
339         SYSCTL_ADD_QUAD(ctx, children,
340                 OID_AUTO, "rcv_bytes",
341                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
342                 "rcv_bytes");
343 
344         SYSCTL_ADD_QUAD(ctx, children,
345                 OID_AUTO, "rcv_mcast_pkts",
346                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
347                 "rcv_mcast_pkts");
348 
349         SYSCTL_ADD_QUAD(ctx, children,
350                 OID_AUTO, "rcv_bcast_pkts",
351                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
352                 "rcv_bcast_pkts");
353 
354         SYSCTL_ADD_QUAD(ctx, children,
355                 OID_AUTO, "rcv_pause_frames",
356                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
357                 "rcv_pause_frames");
358 
359         SYSCTL_ADD_QUAD(ctx, children,
360                 OID_AUTO, "rcv_cntrl_pkts",
361                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
362                 "rcv_cntrl_pkts");
363 
364         SYSCTL_ADD_QUAD(ctx, children,
365                 OID_AUTO, "rcv_pkt_lt_64bytes",
366                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
367                 "rcv_pkt_lt_64bytes");
368 
369         SYSCTL_ADD_QUAD(ctx, children,
370                 OID_AUTO, "rcv_pkt_lt_127bytes",
371                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
372                 "rcv_pkt_lt_127bytes");
373 
374         SYSCTL_ADD_QUAD(ctx, children,
375                 OID_AUTO, "rcv_pkt_lt_255bytes",
376                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
377                 "rcv_pkt_lt_255bytes");
378 
379         SYSCTL_ADD_QUAD(ctx, children,
380                 OID_AUTO, "rcv_pkt_lt_511bytes",
381                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
382                 "rcv_pkt_lt_511bytes");
383 
384         SYSCTL_ADD_QUAD(ctx, children,
385                 OID_AUTO, "rcv_pkt_lt_1023bytes",
386                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
387                 "rcv_pkt_lt_1023bytes");
388 
389         SYSCTL_ADD_QUAD(ctx, children,
390                 OID_AUTO, "rcv_pkt_lt_1518bytes",
391                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
392                 "rcv_pkt_lt_1518bytes");
393 
394         SYSCTL_ADD_QUAD(ctx, children,
395                 OID_AUTO, "rcv_pkt_gt_1518bytes",
396                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
397                 "rcv_pkt_gt_1518bytes");
398 
399         SYSCTL_ADD_QUAD(ctx, children,
400                 OID_AUTO, "rcv_len_error",
401                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
402                 "rcv_len_error");
403 
404         SYSCTL_ADD_QUAD(ctx, children,
405                 OID_AUTO, "rcv_len_small",
406                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
407                 "rcv_len_small");
408 
409         SYSCTL_ADD_QUAD(ctx, children,
410                 OID_AUTO, "rcv_len_large",
411                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
412                 "rcv_len_large");
413 
414         SYSCTL_ADD_QUAD(ctx, children,
415                 OID_AUTO, "rcv_jabber",
416                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
417                 "rcv_jabber");
418 
419         SYSCTL_ADD_QUAD(ctx, children,
420                 OID_AUTO, "rcv_dropped",
421                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
422                 "rcv_dropped");
423 
424         SYSCTL_ADD_QUAD(ctx, children,
425                 OID_AUTO, "fcs_error",
426                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
427                 "fcs_error");
428 
429         SYSCTL_ADD_QUAD(ctx, children,
430                 OID_AUTO, "align_error",
431                 CTLFLAG_RD, &ha->hw.mac.align_error,
432                 "align_error");
433 
434         SYSCTL_ADD_QUAD(ctx, children,
435                 OID_AUTO, "eswitched_frames",
436                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
437                 "eswitched_frames");
438 
439         SYSCTL_ADD_QUAD(ctx, children,
440                 OID_AUTO, "eswitched_bytes",
441                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
442                 "eswitched_bytes");
443 
444         SYSCTL_ADD_QUAD(ctx, children,
445                 OID_AUTO, "eswitched_mcast_frames",
446                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
447                 "eswitched_mcast_frames");
448 
449         SYSCTL_ADD_QUAD(ctx, children,
450                 OID_AUTO, "eswitched_bcast_frames",
451                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
452                 "eswitched_bcast_frames");
453 
454         SYSCTL_ADD_QUAD(ctx, children,
455                 OID_AUTO, "eswitched_ucast_frames",
456                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
457                 "eswitched_ucast_frames");
458 
459         SYSCTL_ADD_QUAD(ctx, children,
460                 OID_AUTO, "eswitched_err_free_frames",
461                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
462                 "eswitched_err_free_frames");
463 
464         SYSCTL_ADD_QUAD(ctx, children,
465                 OID_AUTO, "eswitched_err_free_bytes",
466                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
467                 "eswitched_err_free_bytes");
468 
469 	return;
470 }
471 
472 static void
473 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
474 {
475         struct sysctl_ctx_list  *ctx;
476         struct sysctl_oid_list  *children;
477         struct sysctl_oid       *ctx_oid;
478 
479         ctx = device_get_sysctl_ctx(ha->pci_dev);
480         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
481 
482         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
483 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_rcv");
484         children = SYSCTL_CHILDREN(ctx_oid);
485 
486         SYSCTL_ADD_QUAD(ctx, children,
487                 OID_AUTO, "total_bytes",
488                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
489                 "total_bytes");
490 
491         SYSCTL_ADD_QUAD(ctx, children,
492                 OID_AUTO, "total_pkts",
493                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
494                 "total_pkts");
495 
496         SYSCTL_ADD_QUAD(ctx, children,
497                 OID_AUTO, "lro_pkt_count",
498                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
499                 "lro_pkt_count");
500 
501         SYSCTL_ADD_QUAD(ctx, children,
502                 OID_AUTO, "sw_pkt_count",
503                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
504                 "sw_pkt_count");
505 
506         SYSCTL_ADD_QUAD(ctx, children,
507                 OID_AUTO, "ip_chksum_err",
508                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
509                 "ip_chksum_err");
510 
511         SYSCTL_ADD_QUAD(ctx, children,
512                 OID_AUTO, "pkts_wo_acntxts",
513                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
514                 "pkts_wo_acntxts");
515 
516         SYSCTL_ADD_QUAD(ctx, children,
517                 OID_AUTO, "pkts_dropped_no_sds_card",
518                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
519                 "pkts_dropped_no_sds_card");
520 
521         SYSCTL_ADD_QUAD(ctx, children,
522                 OID_AUTO, "pkts_dropped_no_sds_host",
523                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
524                 "pkts_dropped_no_sds_host");
525 
526         SYSCTL_ADD_QUAD(ctx, children,
527                 OID_AUTO, "oversized_pkts",
528                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
529                 "oversized_pkts");
530 
531         SYSCTL_ADD_QUAD(ctx, children,
532                 OID_AUTO, "pkts_dropped_no_rds",
533                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
534                 "pkts_dropped_no_rds");
535 
536         SYSCTL_ADD_QUAD(ctx, children,
537                 OID_AUTO, "unxpctd_mcast_pkts",
538                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
539                 "unxpctd_mcast_pkts");
540 
541         SYSCTL_ADD_QUAD(ctx, children,
542                 OID_AUTO, "re1_fbq_error",
543                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
544                 "re1_fbq_error");
545 
546         SYSCTL_ADD_QUAD(ctx, children,
547                 OID_AUTO, "invalid_mac_addr",
548                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
549                 "invalid_mac_addr");
550 
551         SYSCTL_ADD_QUAD(ctx, children,
552                 OID_AUTO, "rds_prime_trys",
553                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
554                 "rds_prime_trys");
555 
556         SYSCTL_ADD_QUAD(ctx, children,
557                 OID_AUTO, "rds_prime_success",
558                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
559                 "rds_prime_success");
560 
561         SYSCTL_ADD_QUAD(ctx, children,
562                 OID_AUTO, "lro_flows_added",
563                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
564                 "lro_flows_added");
565 
566         SYSCTL_ADD_QUAD(ctx, children,
567                 OID_AUTO, "lro_flows_deleted",
568                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
569                 "lro_flows_deleted");
570 
571         SYSCTL_ADD_QUAD(ctx, children,
572                 OID_AUTO, "lro_flows_active",
573                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
574                 "lro_flows_active");
575 
576         SYSCTL_ADD_QUAD(ctx, children,
577                 OID_AUTO, "pkts_droped_unknown",
578                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
579                 "pkts_droped_unknown");
580 
581         SYSCTL_ADD_QUAD(ctx, children,
582                 OID_AUTO, "pkts_cnt_oversized",
583                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
584                 "pkts_cnt_oversized");
585 
586 	return;
587 }
588 
589 static void
590 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
591 {
592         struct sysctl_ctx_list  *ctx;
593         struct sysctl_oid_list  *children;
594         struct sysctl_oid_list  *node_children;
595         struct sysctl_oid       *ctx_oid;
596         int                     i;
597         uint8_t                 name_str[16];
598 
599         ctx = device_get_sysctl_ctx(ha->pci_dev);
600         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
601 
602         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
603 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_xmt");
604         children = SYSCTL_CHILDREN(ctx_oid);
605 
606         for (i = 0; i < ha->hw.num_tx_rings; i++) {
607 
608                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
609                 snprintf(name_str, sizeof(name_str), "%d", i);
610 
611                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
612                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
613                 node_children = SYSCTL_CHILDREN(ctx_oid);
614 
615                 /* Tx Related */
616 
617                 SYSCTL_ADD_QUAD(ctx, node_children,
618 			OID_AUTO, "total_bytes",
619                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
620                         "total_bytes");
621 
622                 SYSCTL_ADD_QUAD(ctx, node_children,
623 			OID_AUTO, "total_pkts",
624                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
625                         "total_pkts");
626 
627                 SYSCTL_ADD_QUAD(ctx, node_children,
628 			OID_AUTO, "errors",
629                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
630                         "errors");
631 
632                 SYSCTL_ADD_QUAD(ctx, node_children,
633 			OID_AUTO, "pkts_dropped",
634                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
635                         "pkts_dropped");
636 
637                 SYSCTL_ADD_QUAD(ctx, node_children,
638 			OID_AUTO, "switch_pkts",
639                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
640                         "switch_pkts");
641 
642                 SYSCTL_ADD_QUAD(ctx, node_children,
643 			OID_AUTO, "num_buffers",
644                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
645                         "num_buffers");
646 	}
647 
648 	return;
649 }
650 
651 static void
652 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
653 {
654         struct sysctl_ctx_list  *ctx;
655         struct sysctl_oid_list  *node_children;
656 
657         ctx = device_get_sysctl_ctx(ha->pci_dev);
658         node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
659 
660 	SYSCTL_ADD_QUAD(ctx, node_children,
661 		OID_AUTO, "mbx_completion_time_lt_200ms",
662 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
663 		"mbx_completion_time_lt_200ms");
664 
665 	SYSCTL_ADD_QUAD(ctx, node_children,
666 		OID_AUTO, "mbx_completion_time_200ms_400ms",
667 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
668 		"mbx_completion_time_200ms_400ms");
669 
670 	SYSCTL_ADD_QUAD(ctx, node_children,
671 		OID_AUTO, "mbx_completion_time_400ms_600ms",
672 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
673 		"mbx_completion_time_400ms_600ms");
674 
675 	SYSCTL_ADD_QUAD(ctx, node_children,
676 		OID_AUTO, "mbx_completion_time_600ms_800ms",
677 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
678 		"mbx_completion_time_600ms_800ms");
679 
680 	SYSCTL_ADD_QUAD(ctx, node_children,
681 		OID_AUTO, "mbx_completion_time_800ms_1000ms",
682 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
683 		"mbx_completion_time_800ms_1000ms");
684 
685 	SYSCTL_ADD_QUAD(ctx, node_children,
686 		OID_AUTO, "mbx_completion_time_1000ms_1200ms",
687 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
688 		"mbx_completion_time_1000ms_1200ms");
689 
690 	SYSCTL_ADD_QUAD(ctx, node_children,
691 		OID_AUTO, "mbx_completion_time_1200ms_1400ms",
692 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
693 		"mbx_completion_time_1200ms_1400ms");
694 
695 	SYSCTL_ADD_QUAD(ctx, node_children,
696 		OID_AUTO, "mbx_completion_time_1400ms_1600ms",
697 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
698 		"mbx_completion_time_1400ms_1600ms");
699 
700 	SYSCTL_ADD_QUAD(ctx, node_children,
701 		OID_AUTO, "mbx_completion_time_1600ms_1800ms",
702 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
703 		"mbx_completion_time_1600ms_1800ms");
704 
705 	SYSCTL_ADD_QUAD(ctx, node_children,
706 		OID_AUTO, "mbx_completion_time_1800ms_2000ms",
707 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
708 		"mbx_completion_time_1800ms_2000ms");
709 
710 	SYSCTL_ADD_QUAD(ctx, node_children,
711 		OID_AUTO, "mbx_completion_time_2000ms_2200ms",
712 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
713 		"mbx_completion_time_2000ms_2200ms");
714 
715 	SYSCTL_ADD_QUAD(ctx, node_children,
716 		OID_AUTO, "mbx_completion_time_2200ms_2400ms",
717 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
718 		"mbx_completion_time_2200ms_2400ms");
719 
720 	SYSCTL_ADD_QUAD(ctx, node_children,
721 		OID_AUTO, "mbx_completion_time_2400ms_2600ms",
722 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
723 		"mbx_completion_time_2400ms_2600ms");
724 
725 	SYSCTL_ADD_QUAD(ctx, node_children,
726 		OID_AUTO, "mbx_completion_time_2600ms_2800ms",
727 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
728 		"mbx_completion_time_2600ms_2800ms");
729 
730 	SYSCTL_ADD_QUAD(ctx, node_children,
731 		OID_AUTO, "mbx_completion_time_2800ms_3000ms",
732 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
733 		"mbx_completion_time_2800ms_3000ms");
734 
735 	SYSCTL_ADD_QUAD(ctx, node_children,
736 		OID_AUTO, "mbx_completion_time_3000ms_4000ms",
737 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
738 		"mbx_completion_time_3000ms_4000ms");
739 
740 	SYSCTL_ADD_QUAD(ctx, node_children,
741 		OID_AUTO, "mbx_completion_time_4000ms_5000ms",
742 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
743 		"mbx_completion_time_4000ms_5000ms");
744 
745 	SYSCTL_ADD_QUAD(ctx, node_children,
746 		OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
747 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
748 		"mbx_completion_host_mbx_cntrl_timeout");
749 
750 	SYSCTL_ADD_QUAD(ctx, node_children,
751 		OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
752 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
753 		"mbx_completion_fw_mbx_cntrl_timeout");
754 	return;
755 }
756 
757 static void
758 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
759 {
760 	qlnx_add_hw_mac_stats_sysctls(ha);
761 	qlnx_add_hw_rcv_stats_sysctls(ha);
762 	qlnx_add_hw_xmt_stats_sysctls(ha);
763 	qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
764 
765 	return;
766 }
767 
768 static void
769 qlnx_add_drvr_sds_stats(qla_host_t *ha)
770 {
771         struct sysctl_ctx_list  *ctx;
772         struct sysctl_oid_list  *children;
773         struct sysctl_oid_list  *node_children;
774         struct sysctl_oid       *ctx_oid;
775         int                     i;
776         uint8_t                 name_str[16];
777 
778         ctx = device_get_sysctl_ctx(ha->pci_dev);
779         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
780 
781         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
782 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_sds");
783         children = SYSCTL_CHILDREN(ctx_oid);
784 
785         for (i = 0; i < ha->hw.num_sds_rings; i++) {
786 
787                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
788                 snprintf(name_str, sizeof(name_str), "%d", i);
789 
790                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
791 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
792                 node_children = SYSCTL_CHILDREN(ctx_oid);
793 
794                 SYSCTL_ADD_QUAD(ctx, node_children,
795 			OID_AUTO, "intr_count",
796                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
797                         "intr_count");
798 
799                 SYSCTL_ADD_UINT(ctx, node_children,
800 			OID_AUTO, "rx_free",
801                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
802 			ha->hw.sds[i].rx_free, "rx_free");
803 	}
804 
805 	return;
806 }
807 static void
808 qlnx_add_drvr_rds_stats(qla_host_t *ha)
809 {
810         struct sysctl_ctx_list  *ctx;
811         struct sysctl_oid_list  *children;
812         struct sysctl_oid_list  *node_children;
813         struct sysctl_oid       *ctx_oid;
814         int                     i;
815         uint8_t                 name_str[16];
816 
817         ctx = device_get_sysctl_ctx(ha->pci_dev);
818         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
819 
820         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
821             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_rds");
822         children = SYSCTL_CHILDREN(ctx_oid);
823 
824         for (i = 0; i < ha->hw.num_rds_rings; i++) {
825 
826                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
827                 snprintf(name_str, sizeof(name_str), "%d", i);
828 
829                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
830                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
831                 node_children = SYSCTL_CHILDREN(ctx_oid);
832 
833                 SYSCTL_ADD_QUAD(ctx, node_children,
834 			OID_AUTO, "count",
835                         CTLFLAG_RD, &ha->hw.rds[i].count,
836                         "count");
837 
838                 SYSCTL_ADD_QUAD(ctx, node_children,
839 			OID_AUTO, "lro_pkt_count",
840                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
841                         "lro_pkt_count");
842 
843                 SYSCTL_ADD_QUAD(ctx, node_children,
844 			OID_AUTO, "lro_bytes",
845                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
846                         "lro_bytes");
847 	}
848 
849 	return;
850 }
851 
852 static void
853 qlnx_add_drvr_tx_stats(qla_host_t *ha)
854 {
855         struct sysctl_ctx_list  *ctx;
856         struct sysctl_oid_list  *children;
857         struct sysctl_oid_list  *node_children;
858         struct sysctl_oid       *ctx_oid;
859         int                     i;
860         uint8_t                 name_str[16];
861 
862         ctx = device_get_sysctl_ctx(ha->pci_dev);
863         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
864 
865         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
866             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_xmt");
867         children = SYSCTL_CHILDREN(ctx_oid);
868 
869         for (i = 0; i < ha->hw.num_tx_rings; i++) {
870 
871                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
872                 snprintf(name_str, sizeof(name_str), "%d", i);
873 
874                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
875                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
876                 node_children = SYSCTL_CHILDREN(ctx_oid);
877 
878                 SYSCTL_ADD_QUAD(ctx, node_children,
879 			OID_AUTO, "count",
880                         CTLFLAG_RD, &ha->tx_ring[i].count,
881                         "count");
882 
883 #ifdef QL_ENABLE_ISCSI_TLV
884                 SYSCTL_ADD_QUAD(ctx, node_children,
885 			OID_AUTO, "iscsi_pkt_count",
886                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
887                         "iscsi_pkt_count");
888 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
889 	}
890 
891 	return;
892 }
893 
894 static void
895 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
896 {
897 	qlnx_add_drvr_sds_stats(ha);
898 	qlnx_add_drvr_rds_stats(ha);
899 	qlnx_add_drvr_tx_stats(ha);
900 	return;
901 }
902 
903 /*
904  * Name: ql_hw_add_sysctls
905  * Function: Add P3Plus specific sysctls
906  */
907 void
908 ql_hw_add_sysctls(qla_host_t *ha)
909 {
910         device_t	dev;
911 
912         dev = ha->pci_dev;
913 
914 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
915 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
916 		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
917 		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
918 
919         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
920                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
921                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
922 		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
923 
924         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
925                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
926                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
927 		ha->hw.num_tx_rings, "Number of Transmit Rings");
928 
929         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
932 		ha->txr_idx, "Tx Ring Used");
933 
934         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
935                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
936                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
937 		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
938 
939 	ha->hw.sds_cidx_thres = 32;
940         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
941                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
942                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
943 		ha->hw.sds_cidx_thres,
944 		"Number of SDS entries to process before updating"
945 		" SDS Ring Consumer Index");
946 
947 	ha->hw.rds_pidx_thres = 32;
948         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
949                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
950                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
951 		ha->hw.rds_pidx_thres,
952 		"Number of Rcv Rings Entries to post before updating"
953 		" RDS Ring Producer Index");
954 
955         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
956         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
957                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
958                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
959                 &ha->hw.rcv_intr_coalesce,
960                 ha->hw.rcv_intr_coalesce,
961                 "Rcv Intr Coalescing Parameters\n"
962                 "\tbits 15:0 max packets\n"
963                 "\tbits 31:16 max micro-seconds to wait\n"
964                 "\tplease run\n"
965                 "\tifconfig <if> down && ifconfig <if> up\n"
966                 "\tto take effect \n");
967 
968         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
969         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
970                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
971                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
972                 &ha->hw.xmt_intr_coalesce,
973                 ha->hw.xmt_intr_coalesce,
974                 "Xmt Intr Coalescing Parameters\n"
975                 "\tbits 15:0 max packets\n"
976                 "\tbits 31:16 max micro-seconds to wait\n"
977                 "\tplease run\n"
978                 "\tifconfig <if> down && ifconfig <if> up\n"
979                 "\tto take effect \n");
980 
981         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
982             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
983 	    "port_cfg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
984 	    (void *)ha, 0, qla_sysctl_port_cfg, "I",
985 	    "Set Port Configuration if values below "
986 	    "otherwise Get Port Configuration\n"
987 	    "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
988 	    "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
989 	    "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
990 	    " 1 = xmt only; 2 = rcv only;\n");
991 
992 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
993 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
994 	    "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
995 	    (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I",
996 	    "Set CAM Search Mode"
997 	    "\t 1 = search mode internal\n"
998 	    "\t 2 = search mode auto\n");
999 
1000 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1001 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1002 		"get_cam_search_mode",
1003 		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
1004 		qla_sysctl_get_cam_search_mode, "I",
1005 		"Get CAM Search Mode"
1006 		"\t 1 = search mode internal\n"
1007 		"\t 2 = search mode auto\n");
1008 
1009         ha->hw.enable_9kb = 1;
1010 
1011         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1012                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1013                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
1014                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
1015 
1016         ha->hw.enable_hw_lro = 1;
1017 
1018         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1019                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1020                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
1021                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
1022 		"\t 1 : Hardware LRO if LRO is enabled\n"
1023 		"\t 0 : Software LRO if LRO is enabled\n"
1024 		"\t Any change requires ifconfig down/up to take effect\n"
1025 		"\t Note that LRO may be turned off/on via ifconfig\n");
1026 
1027         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1028                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1029                 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
1030                 ha->hw.sp_log_index, "sp_log_index");
1031 
1032         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1033                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1034                 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
1035                 ha->hw.sp_log_stop, "sp_log_stop");
1036 
1037         ha->hw.sp_log_stop_events = 0;
1038 
1039         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1040                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1041                 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
1042 		&ha->hw.sp_log_stop_events,
1043                 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
1044 		" when OR of the following events occur \n"
1045 		"\t 0x01 : Heart beat Failure\n"
1046 		"\t 0x02 : Temperature Failure\n"
1047 		"\t 0x04 : HW Initialization Failure\n"
1048 		"\t 0x08 : Interface Initialization Failure\n"
1049 		"\t 0x10 : Error Recovery Failure\n");
1050 
1051 	ha->hw.mdump_active = 0;
1052         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1053                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1054                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
1055 		ha->hw.mdump_active,
1056 		"Minidump retrieval is Active");
1057 
1058 	ha->hw.mdump_done = 0;
1059         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1060                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1061                 OID_AUTO, "mdump_done", CTLFLAG_RW,
1062 		&ha->hw.mdump_done, ha->hw.mdump_done,
1063 		"Minidump has been done and available for retrieval");
1064 
1065 	ha->hw.mdump_capture_mask = 0xF;
1066         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1067                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1068                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
1069 		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
1070 		"Minidump capture mask");
1071 #ifdef QL_DBG
1072 
1073 	ha->err_inject = 0;
1074         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1075                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1076                 OID_AUTO, "err_inject",
1077                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
1078                 "Error to be injected\n"
1079                 "\t\t\t 0: No Errors\n"
1080                 "\t\t\t 1: rcv: rxb struct invalid\n"
1081                 "\t\t\t 2: rcv: mp == NULL\n"
1082                 "\t\t\t 3: lro: rxb struct invalid\n"
1083                 "\t\t\t 4: lro: mp == NULL\n"
1084                 "\t\t\t 5: rcv: num handles invalid\n"
1085                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
1086                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
1087                 "\t\t\t 8: mbx: mailbox command failure\n"
1088                 "\t\t\t 9: heartbeat failure\n"
1089                 "\t\t\t A: temperature failure\n"
1090 		"\t\t\t 11: m_getcl or m_getjcl failure\n"
1091 		"\t\t\t 13: Invalid Descriptor Count in SGL Receive\n"
1092 		"\t\t\t 14: Invalid Descriptor Count in LRO Receive\n"
1093 		"\t\t\t 15: peer port error recovery failure\n"
1094 		"\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" );
1095 
1096 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1097             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1098 	    "peg_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1099 	    (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop");
1100 
1101 #endif /* #ifdef QL_DBG */
1102 
1103         ha->hw.user_pri_nic = 0;
1104         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1105                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1106                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
1107                 ha->hw.user_pri_nic,
1108                 "VLAN Tag User Priority for Normal Ethernet Packets");
1109 
1110         ha->hw.user_pri_iscsi = 4;
1111         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1112                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1113                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
1114                 ha->hw.user_pri_iscsi,
1115                 "VLAN Tag User Priority for iSCSI Packets");
1116 
1117 	qlnx_add_hw_stats_sysctls(ha);
1118 	qlnx_add_drvr_stats_sysctls(ha);
1119 
1120 	return;
1121 }
1122 
1123 void
1124 ql_hw_link_status(qla_host_t *ha)
1125 {
1126 	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
1127 
1128 	if (ha->hw.link_up) {
1129 		device_printf(ha->pci_dev, "link Up\n");
1130 	} else {
1131 		device_printf(ha->pci_dev, "link Down\n");
1132 	}
1133 
1134 	if (ha->hw.fduplex) {
1135 		device_printf(ha->pci_dev, "Full Duplex\n");
1136 	} else {
1137 		device_printf(ha->pci_dev, "Half Duplex\n");
1138 	}
1139 
1140 	if (ha->hw.autoneg) {
1141 		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1142 	} else {
1143 		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1144 	}
1145 
1146 	switch (ha->hw.link_speed) {
1147 	case 0x710:
1148 		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1149 		break;
1150 
1151 	case 0x3E8:
1152 		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1153 		break;
1154 
1155 	case 0x64:
1156 		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1157 		break;
1158 
1159 	default:
1160 		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1161 		break;
1162 	}
1163 
1164 	switch (ha->hw.module_type) {
1165 
1166 	case 0x01:
1167 		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1168 		break;
1169 
1170 	case 0x02:
1171 		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1172 		break;
1173 
1174 	case 0x03:
1175 		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1176 		break;
1177 
1178 	case 0x04:
1179 		device_printf(ha->pci_dev,
1180 			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1181 			ha->hw.cable_length);
1182 		break;
1183 
1184 	case 0x05:
1185 		device_printf(ha->pci_dev, "Module Type 10GE Active"
1186 			" Limiting Copper(Compliant)[%d m]\n",
1187 			ha->hw.cable_length);
1188 		break;
1189 
1190 	case 0x06:
1191 		device_printf(ha->pci_dev,
1192 			"Module Type 10GE Passive Copper"
1193 			" (Legacy, Best Effort)[%d m]\n",
1194 			ha->hw.cable_length);
1195 		break;
1196 
1197 	case 0x07:
1198 		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1199 		break;
1200 
1201 	case 0x08:
1202 		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1203 		break;
1204 
1205 	case 0x09:
1206 		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1207 		break;
1208 
1209 	case 0x0A:
1210 		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1211 		break;
1212 
1213 	case 0x0B:
1214 		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1215 			"(Legacy, Best Effort)\n");
1216 		break;
1217 
1218 	default:
1219 		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1220 			ha->hw.module_type);
1221 		break;
1222 	}
1223 
1224 	if (ha->hw.link_faults == 1)
1225 		device_printf(ha->pci_dev, "SFP Power Fault\n");
1226 }
1227 
1228 /*
1229  * Name: ql_free_dma
1230  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1231  */
1232 void
1233 ql_free_dma(qla_host_t *ha)
1234 {
1235 	uint32_t i;
1236 
1237         if (ha->hw.dma_buf.flags.sds_ring) {
1238 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1239 			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1240 		}
1241         	ha->hw.dma_buf.flags.sds_ring = 0;
1242 	}
1243 
1244         if (ha->hw.dma_buf.flags.rds_ring) {
1245 		for (i = 0; i < ha->hw.num_rds_rings; i++) {
1246 			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1247 		}
1248         	ha->hw.dma_buf.flags.rds_ring = 0;
1249 	}
1250 
1251         if (ha->hw.dma_buf.flags.tx_ring) {
1252 		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1253         	ha->hw.dma_buf.flags.tx_ring = 0;
1254 	}
1255 	ql_minidump_free(ha);
1256 }
1257 
1258 /*
1259  * Name: ql_alloc_dma
1260  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1261  */
1262 int
1263 ql_alloc_dma(qla_host_t *ha)
1264 {
1265         device_t                dev;
1266 	uint32_t		i, j, size, tx_ring_size;
1267 	qla_hw_t		*hw;
1268 	qla_hw_tx_cntxt_t	*tx_cntxt;
1269 	uint8_t			*vaddr;
1270 	bus_addr_t		paddr;
1271 
1272         dev = ha->pci_dev;
1273 
1274         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1275 
1276 	hw = &ha->hw;
1277 	/*
1278 	 * Allocate Transmit Ring
1279 	 */
1280 	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1281 	size = (tx_ring_size * ha->hw.num_tx_rings);
1282 
1283 	hw->dma_buf.tx_ring.alignment = 8;
1284 	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1285 
1286         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1287                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1288                 goto ql_alloc_dma_exit;
1289         }
1290 
1291 	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1292 	paddr = hw->dma_buf.tx_ring.dma_addr;
1293 
1294 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1295 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1296 
1297 		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1298 		tx_cntxt->tx_ring_paddr = paddr;
1299 
1300 		vaddr += tx_ring_size;
1301 		paddr += tx_ring_size;
1302 	}
1303 
1304 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1305 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1306 
1307 		tx_cntxt->tx_cons = (uint32_t *)vaddr;
1308 		tx_cntxt->tx_cons_paddr = paddr;
1309 
1310 		vaddr += sizeof (uint32_t);
1311 		paddr += sizeof (uint32_t);
1312 	}
1313 
1314         ha->hw.dma_buf.flags.tx_ring = 1;
1315 
1316 	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1317 		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1318 		hw->dma_buf.tx_ring.dma_b));
1319 	/*
1320 	 * Allocate Receive Descriptor Rings
1321 	 */
1322 
1323 	for (i = 0; i < hw->num_rds_rings; i++) {
1324 
1325 		hw->dma_buf.rds_ring[i].alignment = 8;
1326 		hw->dma_buf.rds_ring[i].size =
1327 			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1328 
1329 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1330 			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1331 				__func__, i);
1332 
1333 			for (j = 0; j < i; j++)
1334 				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1335 
1336 			goto ql_alloc_dma_exit;
1337 		}
1338 		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1339 			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1340 			hw->dma_buf.rds_ring[i].dma_b));
1341 	}
1342 
1343 	hw->dma_buf.flags.rds_ring = 1;
1344 
1345 	/*
1346 	 * Allocate Status Descriptor Rings
1347 	 */
1348 
1349 	for (i = 0; i < hw->num_sds_rings; i++) {
1350 		hw->dma_buf.sds_ring[i].alignment = 8;
1351 		hw->dma_buf.sds_ring[i].size =
1352 			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1353 
1354 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1355 			device_printf(dev, "%s: sds ring alloc failed\n",
1356 				__func__);
1357 
1358 			for (j = 0; j < i; j++)
1359 				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1360 
1361 			goto ql_alloc_dma_exit;
1362 		}
1363 		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1364 			__func__, i,
1365 			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
1366 			hw->dma_buf.sds_ring[i].dma_b));
1367 	}
1368 	for (i = 0; i < hw->num_sds_rings; i++) {
1369 		hw->sds[i].sds_ring_base =
1370 			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1371 	}
1372 
1373 	hw->dma_buf.flags.sds_ring = 1;
1374 
1375 	return 0;
1376 
1377 ql_alloc_dma_exit:
1378 	ql_free_dma(ha);
1379 	return -1;
1380 }
1381 
1382 #define Q8_MBX_MSEC_DELAY	5000
1383 
1384 static int
1385 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1386 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1387 {
1388 	uint32_t i;
1389 	uint32_t data;
1390 	int ret = 0;
1391 	uint64_t start_usecs;
1392 	uint64_t end_usecs;
1393 	uint64_t msecs_200;
1394 
1395 	ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
1396 
1397 	if (ha->offline || ha->qla_initiate_recovery) {
1398 		ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
1399 		goto exit_qla_mbx_cmd;
1400 	}
1401 
1402 	if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
1403 		(((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
1404 		!(ha->err_inject & ~0xFFFF))) {
1405 		ret = -3;
1406 		QL_INITIATE_RECOVERY(ha);
1407 		goto exit_qla_mbx_cmd;
1408 	}
1409 
1410 	start_usecs = qla_get_usec_timestamp();
1411 
1412 	if (no_pause)
1413 		i = 1000;
1414 	else
1415 		i = Q8_MBX_MSEC_DELAY;
1416 
1417 	while (i) {
1418 
1419 		if (ha->qla_initiate_recovery) {
1420 			ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1421 			return (-1);
1422 		}
1423 
1424 		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1425 		if (data == 0)
1426 			break;
1427 		if (no_pause) {
1428 			DELAY(1000);
1429 		} else {
1430 			qla_mdelay(__func__, 1);
1431 		}
1432 		i--;
1433 	}
1434 
1435 	if (i == 0) {
1436 		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1437 			__func__, data);
1438 		ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
1439 		ret = -1;
1440 		ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
1441 		QL_INITIATE_RECOVERY(ha);
1442 		goto exit_qla_mbx_cmd;
1443 	}
1444 
1445 	for (i = 0; i < n_hmbox; i++) {
1446 		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1447 		h_mbox++;
1448 	}
1449 
1450 	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1451 
1452 
1453 	i = Q8_MBX_MSEC_DELAY;
1454 	while (i) {
1455 
1456 		if (ha->qla_initiate_recovery) {
1457 			ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1458 			return (-1);
1459 		}
1460 
1461 		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1462 
1463 		if ((data & 0x3) == 1) {
1464 			data = READ_REG32(ha, Q8_FW_MBOX0);
1465 			if ((data & 0xF000) != 0x8000)
1466 				break;
1467 		}
1468 		if (no_pause) {
1469 			DELAY(1000);
1470 		} else {
1471 			qla_mdelay(__func__, 1);
1472 		}
1473 		i--;
1474 	}
1475 	if (i == 0) {
1476 		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1477 			__func__, data);
1478 		ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
1479 		ret = -2;
1480 		ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
1481 		QL_INITIATE_RECOVERY(ha);
1482 		goto exit_qla_mbx_cmd;
1483 	}
1484 
1485 	for (i = 0; i < n_fwmbox; i++) {
1486 
1487 		if (ha->qla_initiate_recovery) {
1488 			ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1489 			return (-1);
1490 		}
1491 
1492 		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1493 	}
1494 
1495 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1496 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1497 
1498 	end_usecs = qla_get_usec_timestamp();
1499 
1500 	if (end_usecs > start_usecs) {
1501 		msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
1502 
1503 		if (msecs_200 < 15)
1504 			ha->hw.mbx_comp_msecs[msecs_200]++;
1505 		else if (msecs_200 < 20)
1506 			ha->hw.mbx_comp_msecs[15]++;
1507 		else {
1508 			device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
1509 				start_usecs, end_usecs, msecs_200);
1510 			ha->hw.mbx_comp_msecs[16]++;
1511 		}
1512 	}
1513 	ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
1514 
1515 
1516 exit_qla_mbx_cmd:
1517 	return (ret);
1518 }
1519 
1520 int
1521 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1522 	uint32_t *num_rcvq)
1523 {
1524 	uint32_t *mbox, err;
1525 	device_t dev = ha->pci_dev;
1526 
1527 	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1528 
1529 	mbox = ha->hw.mbox;
1530 
1531 	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
1532 
1533 	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1534 		device_printf(dev, "%s: failed0\n", __func__);
1535 		return (-1);
1536 	}
1537 	err = mbox[0] >> 25;
1538 
1539 	if (supports_9kb != NULL) {
1540 		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1541 			*supports_9kb = 1;
1542 		else
1543 			*supports_9kb = 0;
1544 	}
1545 
1546 	if (num_rcvq != NULL)
1547 		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1548 
1549 	if ((err != 1) && (err != 0)) {
1550 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1551 		return (-1);
1552 	}
1553 	return 0;
1554 }
1555 
1556 static int
1557 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1558 	uint32_t create)
1559 {
1560 	uint32_t i, err;
1561 	device_t dev = ha->pci_dev;
1562 	q80_config_intr_t *c_intr;
1563 	q80_config_intr_rsp_t *c_intr_rsp;
1564 
1565 	c_intr = (q80_config_intr_t *)ha->hw.mbox;
1566 	bzero(c_intr, (sizeof (q80_config_intr_t)));
1567 
1568 	c_intr->opcode = Q8_MBX_CONFIG_INTR;
1569 
1570 	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1571 	c_intr->count_version |= Q8_MBX_CMD_VERSION;
1572 
1573 	c_intr->nentries = num_intrs;
1574 
1575 	for (i = 0; i < num_intrs; i++) {
1576 		if (create) {
1577 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1578 			c_intr->intr[i].msix_index = start_idx + 1 + i;
1579 		} else {
1580 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1581 			c_intr->intr[i].msix_index =
1582 				ha->hw.intr_id[(start_idx + i)];
1583 		}
1584 
1585 		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1586 	}
1587 
1588 	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1589 		(sizeof (q80_config_intr_t) >> 2),
1590 		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1591 		device_printf(dev, "%s: %s failed0\n", __func__,
1592 			(create ? "create" : "delete"));
1593 		return (-1);
1594 	}
1595 
1596 	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1597 
1598 	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1599 
1600 	if (err) {
1601 		device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
1602 			(create ? "create" : "delete"), err, c_intr_rsp->nentries);
1603 
1604 		for (i = 0; i < c_intr_rsp->nentries; i++) {
1605 			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1606 				__func__, i,
1607 				c_intr_rsp->intr[i].status,
1608 				c_intr_rsp->intr[i].intr_id,
1609 				c_intr_rsp->intr[i].intr_src);
1610 		}
1611 
1612 		return (-1);
1613 	}
1614 
1615 	for (i = 0; ((i < num_intrs) && create); i++) {
1616 		if (!c_intr_rsp->intr[i].status) {
1617 			ha->hw.intr_id[(start_idx + i)] =
1618 				c_intr_rsp->intr[i].intr_id;
1619 			ha->hw.intr_src[(start_idx + i)] =
1620 				c_intr_rsp->intr[i].intr_src;
1621 		}
1622 	}
1623 
1624 	return (0);
1625 }
1626 
1627 /*
1628  * Name: qla_config_rss
1629  * Function: Configure RSS for the context/interface.
1630  */
1631 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1632 			0x8030f20c77cb2da3ULL,
1633 			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1634 			0x255b0ec26d5a56daULL };
1635 
1636 static int
1637 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1638 {
1639 	q80_config_rss_t	*c_rss;
1640 	q80_config_rss_rsp_t	*c_rss_rsp;
1641 	uint32_t		err, i;
1642 	device_t		dev = ha->pci_dev;
1643 
1644 	c_rss = (q80_config_rss_t *)ha->hw.mbox;
1645 	bzero(c_rss, (sizeof (q80_config_rss_t)));
1646 
1647 	c_rss->opcode = Q8_MBX_CONFIG_RSS;
1648 
1649 	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1650 	c_rss->count_version |= Q8_MBX_CMD_VERSION;
1651 
1652 	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1653 				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1654 	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1655 	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1656 
1657 	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1658 	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1659 
1660 	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1661 
1662 	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1663 	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1664 
1665 	c_rss->cntxt_id = cntxt_id;
1666 
1667 	for (i = 0; i < 5; i++) {
1668 		c_rss->rss_key[i] = rss_key[i];
1669 	}
1670 
1671 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1672 		(sizeof (q80_config_rss_t) >> 2),
1673 		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1674 		device_printf(dev, "%s: failed0\n", __func__);
1675 		return (-1);
1676 	}
1677 	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1678 
1679 	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1680 
1681 	if (err) {
1682 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1683 		return (-1);
1684 	}
1685 	return 0;
1686 }
1687 
1688 static int
1689 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1690         uint16_t cntxt_id, uint8_t *ind_table)
1691 {
1692         q80_config_rss_ind_table_t      *c_rss_ind;
1693         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1694         uint32_t                        err;
1695         device_t                        dev = ha->pci_dev;
1696 
1697 	if ((count > Q8_RSS_IND_TBL_SIZE) ||
1698 		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1699 		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1700 			start_idx, count);
1701 		return (-1);
1702 	}
1703 
1704         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1705         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1706 
1707         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1708         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1709         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1710 
1711 	c_rss_ind->start_idx = start_idx;
1712 	c_rss_ind->end_idx = start_idx + count - 1;
1713 	c_rss_ind->cntxt_id = cntxt_id;
1714 	bcopy(ind_table, c_rss_ind->ind_table, count);
1715 
1716 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1717 		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1718 		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1719 		device_printf(dev, "%s: failed0\n", __func__);
1720 		return (-1);
1721 	}
1722 
1723 	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1724 	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1725 
1726 	if (err) {
1727 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1728 		return (-1);
1729 	}
1730 	return 0;
1731 }
1732 
1733 /*
1734  * Name: qla_config_intr_coalesce
1735  * Function: Configure Interrupt Coalescing.
1736  */
1737 static int
1738 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1739 	int rcv)
1740 {
1741 	q80_config_intr_coalesc_t	*intrc;
1742 	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
1743 	uint32_t			err, i;
1744 	device_t			dev = ha->pci_dev;
1745 
1746 	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1747 	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1748 
1749 	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1750 	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1751 	intrc->count_version |= Q8_MBX_CMD_VERSION;
1752 
1753 	if (rcv) {
1754 		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1755 		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1756 		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1757 	} else {
1758 		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1759 		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1760 		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1761 	}
1762 
1763 	intrc->cntxt_id = cntxt_id;
1764 
1765 	if (tenable) {
1766 		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1767 		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1768 
1769 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1770 			intrc->sds_ring_mask |= (1 << i);
1771 		}
1772 		intrc->ms_timeout = 1000;
1773 	}
1774 
1775 	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1776 		(sizeof (q80_config_intr_coalesc_t) >> 2),
1777 		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1778 		device_printf(dev, "%s: failed0\n", __func__);
1779 		return (-1);
1780 	}
1781 	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1782 
1783 	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1784 
1785 	if (err) {
1786 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1787 		return (-1);
1788 	}
1789 
1790 	return 0;
1791 }
1792 
1793 
1794 /*
1795  * Name: qla_config_mac_addr
1796  * Function: binds a MAC address to the context/interface.
1797  *	Can be unicast, multicast or broadcast.
1798  */
1799 static int
1800 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1801 	uint32_t num_mac)
1802 {
1803 	q80_config_mac_addr_t		*cmac;
1804 	q80_config_mac_addr_rsp_t	*cmac_rsp;
1805 	uint32_t			err;
1806 	device_t			dev = ha->pci_dev;
1807 	int				i;
1808 	uint8_t				*mac_cpy = mac_addr;
1809 
1810 	if (num_mac > Q8_MAX_MAC_ADDRS) {
1811 		device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1812 			__func__, (add_mac ? "Add" : "Del"), num_mac);
1813 		return (-1);
1814 	}
1815 
1816 	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1817 	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1818 
1819 	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1820 	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1821 	cmac->count_version |= Q8_MBX_CMD_VERSION;
1822 
1823 	if (add_mac)
1824 		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1825 	else
1826 		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1827 
1828 	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1829 
1830 	cmac->nmac_entries = num_mac;
1831 	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1832 
1833 	for (i = 0; i < num_mac; i++) {
1834 		bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1835 		mac_addr = mac_addr + ETHER_ADDR_LEN;
1836 	}
1837 
1838 	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1839 		(sizeof (q80_config_mac_addr_t) >> 2),
1840 		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1841 		device_printf(dev, "%s: %s failed0\n", __func__,
1842 			(add_mac ? "Add" : "Del"));
1843 		return (-1);
1844 	}
1845 	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1846 
1847 	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1848 
1849 	if (err) {
1850 		device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1851 			(add_mac ? "Add" : "Del"), err);
1852 		for (i = 0; i < num_mac; i++) {
1853 			device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1854 				__func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1855 				mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1856 			mac_cpy += ETHER_ADDR_LEN;
1857 		}
1858 		return (-1);
1859 	}
1860 
1861 	return 0;
1862 }
1863 
1864 
1865 /*
1866  * Name: qla_set_mac_rcv_mode
1867  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1868  */
1869 static int
1870 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1871 {
1872 	q80_config_mac_rcv_mode_t	*rcv_mode;
1873 	uint32_t			err;
1874 	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1875 	device_t			dev = ha->pci_dev;
1876 
1877 	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1878 	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1879 
1880 	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1881 	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1882 	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1883 
1884 	rcv_mode->mode = mode;
1885 
1886 	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1887 
1888 	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1889 		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1890 		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1891 		device_printf(dev, "%s: failed0\n", __func__);
1892 		return (-1);
1893 	}
1894 	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1895 
1896 	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1897 
1898 	if (err) {
1899 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1900 		return (-1);
1901 	}
1902 
1903 	return 0;
1904 }
1905 
1906 int
1907 ql_set_promisc(qla_host_t *ha)
1908 {
1909 	int ret;
1910 
1911 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1912 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1913 	return (ret);
1914 }
1915 
1916 void
1917 qla_reset_promisc(qla_host_t *ha)
1918 {
1919 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1920 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1921 }
1922 
1923 int
1924 ql_set_allmulti(qla_host_t *ha)
1925 {
1926 	int ret;
1927 
1928 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1929 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1930 	return (ret);
1931 }
1932 
1933 void
1934 qla_reset_allmulti(qla_host_t *ha)
1935 {
1936 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1937 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1938 }
1939 
1940 /*
1941  * Name: ql_set_max_mtu
1942  * Function:
1943  *	Sets the maximum transfer unit size for the specified rcv context.
1944  */
1945 int
1946 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1947 {
1948 	device_t		dev;
1949 	q80_set_max_mtu_t	*max_mtu;
1950 	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1951 	uint32_t		err;
1952 
1953 	dev = ha->pci_dev;
1954 
1955 	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1956 	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1957 
1958 	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1959 	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1960 	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1961 
1962 	max_mtu->cntxt_id = cntxt_id;
1963 	max_mtu->mtu = mtu;
1964 
1965         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1966 		(sizeof (q80_set_max_mtu_t) >> 2),
1967                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1968                 device_printf(dev, "%s: failed\n", __func__);
1969                 return -1;
1970         }
1971 
1972 	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1973 
1974         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1975 
1976         if (err) {
1977                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1978         }
1979 
1980 	return 0;
1981 }
1982 
1983 static int
1984 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1985 {
1986 	device_t		dev;
1987 	q80_link_event_t	*lnk;
1988 	q80_link_event_rsp_t	*lnk_rsp;
1989 	uint32_t		err;
1990 
1991 	dev = ha->pci_dev;
1992 
1993 	lnk = (q80_link_event_t *)ha->hw.mbox;
1994 	bzero(lnk, (sizeof (q80_link_event_t)));
1995 
1996 	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1997 	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1998 	lnk->count_version |= Q8_MBX_CMD_VERSION;
1999 
2000 	lnk->cntxt_id = cntxt_id;
2001 	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
2002 
2003         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
2004                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
2005                 device_printf(dev, "%s: failed\n", __func__);
2006                 return -1;
2007         }
2008 
2009 	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
2010 
2011         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
2012 
2013         if (err) {
2014                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2015         }
2016 
2017 	return 0;
2018 }
2019 
2020 static int
2021 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
2022 {
2023 	device_t		dev;
2024 	q80_config_fw_lro_t	*fw_lro;
2025 	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
2026 	uint32_t		err;
2027 
2028 	dev = ha->pci_dev;
2029 
2030 	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
2031 	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
2032 
2033 	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
2034 	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
2035 	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
2036 
2037 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
2038 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
2039 
2040 	fw_lro->cntxt_id = cntxt_id;
2041 
2042 	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
2043 		(sizeof (q80_config_fw_lro_t) >> 2),
2044 		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
2045 		device_printf(dev, "%s: failed\n", __func__);
2046 		return -1;
2047 	}
2048 
2049 	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
2050 
2051 	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
2052 
2053 	if (err) {
2054 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2055 	}
2056 
2057 	return 0;
2058 }
2059 
2060 static int
2061 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
2062 {
2063 	device_t                dev;
2064 	q80_hw_config_t         *hw_config;
2065 	q80_hw_config_rsp_t     *hw_config_rsp;
2066 	uint32_t                err;
2067 
2068 	dev = ha->pci_dev;
2069 
2070 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
2071 	bzero(hw_config, sizeof (q80_hw_config_t));
2072 
2073 	hw_config->opcode = Q8_MBX_HW_CONFIG;
2074 	hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
2075 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
2076 
2077 	hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
2078 
2079 	hw_config->u.set_cam_search_mode.mode = search_mode;
2080 
2081 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2082 		(sizeof (q80_hw_config_t) >> 2),
2083 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2084 		device_printf(dev, "%s: failed\n", __func__);
2085 		return -1;
2086 	}
2087 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2088 
2089 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2090 
2091 	if (err) {
2092 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2093 	}
2094 
2095 	return 0;
2096 }
2097 
2098 static int
2099 qla_get_cam_search_mode(qla_host_t *ha)
2100 {
2101 	device_t                dev;
2102 	q80_hw_config_t         *hw_config;
2103 	q80_hw_config_rsp_t     *hw_config_rsp;
2104 	uint32_t                err;
2105 
2106 	dev = ha->pci_dev;
2107 
2108 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
2109 	bzero(hw_config, sizeof (q80_hw_config_t));
2110 
2111 	hw_config->opcode = Q8_MBX_HW_CONFIG;
2112 	hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
2113 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
2114 
2115 	hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
2116 
2117 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2118 		(sizeof (q80_hw_config_t) >> 2),
2119 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2120 		device_printf(dev, "%s: failed\n", __func__);
2121 		return -1;
2122 	}
2123 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2124 
2125 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2126 
2127 	if (err) {
2128 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2129 	} else {
2130 		device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
2131 			hw_config_rsp->u.get_cam_search_mode.mode);
2132 	}
2133 
2134 	return 0;
2135 }
2136 
2137 static int
2138 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
2139 {
2140 	device_t		dev;
2141 	q80_get_stats_t		*stat;
2142 	q80_get_stats_rsp_t	*stat_rsp;
2143 	uint32_t		err;
2144 
2145 	dev = ha->pci_dev;
2146 
2147 	stat = (q80_get_stats_t *)ha->hw.mbox;
2148 	bzero(stat, (sizeof (q80_get_stats_t)));
2149 
2150 	stat->opcode = Q8_MBX_GET_STATS;
2151 	stat->count_version = 2;
2152 	stat->count_version |= Q8_MBX_CMD_VERSION;
2153 
2154 	stat->cmd = cmd;
2155 
2156         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
2157                 ha->hw.mbox, (rsp_size >> 2), 0)) {
2158                 device_printf(dev, "%s: failed\n", __func__);
2159                 return -1;
2160         }
2161 
2162 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2163 
2164         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
2165 
2166         if (err) {
2167                 return -1;
2168         }
2169 
2170 	return 0;
2171 }
2172 
2173 void
2174 ql_get_stats(qla_host_t *ha)
2175 {
2176 	q80_get_stats_rsp_t	*stat_rsp;
2177 	q80_mac_stats_t		*mstat;
2178 	q80_xmt_stats_t		*xstat;
2179 	q80_rcv_stats_t		*rstat;
2180 	uint32_t		cmd;
2181 	int			i;
2182 	struct ifnet *ifp = ha->ifp;
2183 
2184 	if (ifp == NULL)
2185 		return;
2186 
2187 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2188 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
2189 		return;
2190 	}
2191 
2192 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2193 		QLA_UNLOCK(ha, __func__);
2194 		return;
2195 	}
2196 
2197 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2198 	/*
2199 	 * Get MAC Statistics
2200 	 */
2201 	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2202 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
2203 
2204 	cmd |= ((ha->pci_func & 0x1) << 16);
2205 
2206 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2207 		ha->offline)
2208 		goto ql_get_stats_exit;
2209 
2210 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2211 		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2212 		bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2213 	} else {
2214                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2215 			__func__, ha->hw.mbox[0]);
2216 	}
2217 	/*
2218 	 * Get RCV Statistics
2219 	 */
2220 	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2221 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
2222 	cmd |= (ha->hw.rcv_cntxt_id << 16);
2223 
2224 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2225 		ha->offline)
2226 		goto ql_get_stats_exit;
2227 
2228 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2229 		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2230 		bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2231 	} else {
2232                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2233 			__func__, ha->hw.mbox[0]);
2234 	}
2235 
2236 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2237 		ha->offline)
2238 		goto ql_get_stats_exit;
2239 	/*
2240 	 * Get XMT Statistics
2241 	 */
2242 	for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
2243 		if (ha->qla_watchdog_pause ||
2244 			(!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2245 			ha->offline)
2246 			goto ql_get_stats_exit;
2247 
2248 		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2249 //		cmd |= Q8_GET_STATS_CMD_CLEAR;
2250 		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2251 
2252 		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2253 			== 0) {
2254 			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2255 			bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2256 		} else {
2257 			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2258 				__func__, ha->hw.mbox[0]);
2259 		}
2260 	}
2261 
2262 ql_get_stats_exit:
2263 	QLA_UNLOCK(ha, __func__);
2264 
2265 	return;
2266 }
2267 
2268 /*
2269  * Name: qla_tx_tso
2270  * Function: Checks if the packet to be transmitted is a candidate for
2271  *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2272  *	Ring Structure are plugged in.
2273  */
2274 static int
2275 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2276 {
2277 	struct ether_vlan_header *eh;
2278 	struct ip *ip = NULL;
2279 	struct ip6_hdr *ip6 = NULL;
2280 	struct tcphdr *th = NULL;
2281 	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2282 	uint16_t etype, opcode, offload = 1;
2283 	device_t dev;
2284 
2285 	dev = ha->pci_dev;
2286 
2287 
2288 	eh = mtod(mp, struct ether_vlan_header *);
2289 
2290 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2291 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2292 		etype = ntohs(eh->evl_proto);
2293 	} else {
2294 		ehdrlen = ETHER_HDR_LEN;
2295 		etype = ntohs(eh->evl_encap_proto);
2296 	}
2297 
2298 	hdrlen = 0;
2299 
2300 	switch (etype) {
2301 		case ETHERTYPE_IP:
2302 
2303 			tcp_opt_off = ehdrlen + sizeof(struct ip) +
2304 					sizeof(struct tcphdr);
2305 
2306 			if (mp->m_len < tcp_opt_off) {
2307 				m_copydata(mp, 0, tcp_opt_off, hdr);
2308 				ip = (struct ip *)(hdr + ehdrlen);
2309 			} else {
2310 				ip = (struct ip *)(mp->m_data + ehdrlen);
2311 			}
2312 
2313 			ip_hlen = ip->ip_hl << 2;
2314 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2315 
2316 
2317 			if ((ip->ip_p != IPPROTO_TCP) ||
2318 				(ip_hlen != sizeof (struct ip))){
2319 				/* IP Options are not supported */
2320 
2321 				offload = 0;
2322 			} else
2323 				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2324 
2325 		break;
2326 
2327 		case ETHERTYPE_IPV6:
2328 
2329 			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2330 					sizeof (struct tcphdr);
2331 
2332 			if (mp->m_len < tcp_opt_off) {
2333 				m_copydata(mp, 0, tcp_opt_off, hdr);
2334 				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2335 			} else {
2336 				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2337 			}
2338 
2339 			ip_hlen = sizeof(struct ip6_hdr);
2340 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2341 
2342 			if (ip6->ip6_nxt != IPPROTO_TCP) {
2343 				//device_printf(dev, "%s: ipv6\n", __func__);
2344 				offload = 0;
2345 			} else
2346 				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2347 		break;
2348 
2349 		default:
2350 			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2351 			offload = 0;
2352 		break;
2353 	}
2354 
2355 	if (!offload)
2356 		return (-1);
2357 
2358 	tcp_hlen = th->th_off << 2;
2359 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2360 
2361         if (mp->m_len < hdrlen) {
2362                 if (mp->m_len < tcp_opt_off) {
2363                         if (tcp_hlen > sizeof(struct tcphdr)) {
2364                                 m_copydata(mp, tcp_opt_off,
2365                                         (tcp_hlen - sizeof(struct tcphdr)),
2366                                         &hdr[tcp_opt_off]);
2367                         }
2368                 } else {
2369                         m_copydata(mp, 0, hdrlen, hdr);
2370                 }
2371         }
2372 
2373 	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2374 
2375 	tx_cmd->flags_opcode = opcode ;
2376 	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2377 	tx_cmd->total_hdr_len = hdrlen;
2378 
2379 	/* Check for Multicast least significant bit of MSB == 1 */
2380 	if (eh->evl_dhost[0] & 0x01) {
2381 		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2382 	}
2383 
2384 	if (mp->m_len < hdrlen) {
2385 		printf("%d\n", hdrlen);
2386 		return (1);
2387 	}
2388 
2389 	return (0);
2390 }
2391 
2392 /*
2393  * Name: qla_tx_chksum
2394  * Function: Checks if the packet to be transmitted is a candidate for
2395  *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2396  *	Ring Structure are plugged in.
2397  */
2398 static int
2399 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2400 	uint32_t *tcp_hdr_off)
2401 {
2402 	struct ether_vlan_header *eh;
2403 	struct ip *ip;
2404 	struct ip6_hdr *ip6;
2405 	uint32_t ehdrlen, ip_hlen;
2406 	uint16_t etype, opcode, offload = 1;
2407 	device_t dev;
2408 	uint8_t buf[sizeof(struct ip6_hdr)];
2409 
2410 	dev = ha->pci_dev;
2411 
2412 	*op_code = 0;
2413 
2414 	if ((mp->m_pkthdr.csum_flags &
2415 		(CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2416 		return (-1);
2417 
2418 	eh = mtod(mp, struct ether_vlan_header *);
2419 
2420 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2421 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2422 		etype = ntohs(eh->evl_proto);
2423 	} else {
2424 		ehdrlen = ETHER_HDR_LEN;
2425 		etype = ntohs(eh->evl_encap_proto);
2426 	}
2427 
2428 
2429 	switch (etype) {
2430 		case ETHERTYPE_IP:
2431 			ip = (struct ip *)(mp->m_data + ehdrlen);
2432 
2433 			ip_hlen = sizeof (struct ip);
2434 
2435 			if (mp->m_len < (ehdrlen + ip_hlen)) {
2436 				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2437 				ip = (struct ip *)buf;
2438 			}
2439 
2440 			if (ip->ip_p == IPPROTO_TCP)
2441 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2442 			else if (ip->ip_p == IPPROTO_UDP)
2443 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2444 			else {
2445 				//device_printf(dev, "%s: ipv4\n", __func__);
2446 				offload = 0;
2447 			}
2448 		break;
2449 
2450 		case ETHERTYPE_IPV6:
2451 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2452 
2453 			ip_hlen = sizeof(struct ip6_hdr);
2454 
2455 			if (mp->m_len < (ehdrlen + ip_hlen)) {
2456 				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2457 					buf);
2458 				ip6 = (struct ip6_hdr *)buf;
2459 			}
2460 
2461 			if (ip6->ip6_nxt == IPPROTO_TCP)
2462 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2463 			else if (ip6->ip6_nxt == IPPROTO_UDP)
2464 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2465 			else {
2466 				//device_printf(dev, "%s: ipv6\n", __func__);
2467 				offload = 0;
2468 			}
2469 		break;
2470 
2471 		default:
2472 			offload = 0;
2473 		break;
2474 	}
2475 	if (!offload)
2476 		return (-1);
2477 
2478 	*op_code = opcode;
2479 	*tcp_hdr_off = (ip_hlen + ehdrlen);
2480 
2481 	return (0);
2482 }
2483 
2484 #define QLA_TX_MIN_FREE 2
2485 /*
2486  * Name: ql_hw_send
2487  * Function: Transmits a packet. It first checks if the packet is a
2488  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2489  *	offload. If either of these creteria are not met, it is transmitted
2490  *	as a regular ethernet frame.
2491  */
2492 int
2493 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2494 	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2495 {
2496 	struct ether_vlan_header *eh;
2497 	qla_hw_t *hw = &ha->hw;
2498 	q80_tx_cmd_t *tx_cmd, tso_cmd;
2499 	bus_dma_segment_t *c_seg;
2500 	uint32_t num_tx_cmds, hdr_len = 0;
2501 	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2502 	device_t dev;
2503 	int i, ret;
2504 	uint8_t *src = NULL, *dst = NULL;
2505 	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2506 	uint32_t op_code = 0;
2507 	uint32_t tcp_hdr_off = 0;
2508 
2509 	dev = ha->pci_dev;
2510 
2511 	/*
2512 	 * Always make sure there is atleast one empty slot in the tx_ring
2513 	 * tx_ring is considered full when there only one entry available
2514 	 */
2515         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2516 
2517 	total_length = mp->m_pkthdr.len;
2518 	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2519 		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2520 			__func__, total_length);
2521 		return (EINVAL);
2522 	}
2523 	eh = mtod(mp, struct ether_vlan_header *);
2524 
2525 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2526 
2527 		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2528 
2529 		src = frame_hdr;
2530 		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2531 
2532 		if (!(ret & ~1)) {
2533 			/* find the additional tx_cmd descriptors required */
2534 
2535 			if (mp->m_flags & M_VLANTAG)
2536 				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2537 
2538 			hdr_len = tso_cmd.total_hdr_len;
2539 
2540 			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2541 			bytes = QL_MIN(bytes, hdr_len);
2542 
2543 			num_tx_cmds++;
2544 			hdr_len -= bytes;
2545 
2546 			while (hdr_len) {
2547 				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2548 				hdr_len -= bytes;
2549 				num_tx_cmds++;
2550 			}
2551 			hdr_len = tso_cmd.total_hdr_len;
2552 
2553 			if (ret == 0)
2554 				src = (uint8_t *)eh;
2555 		} else
2556 			return (EINVAL);
2557 	} else {
2558 		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2559 	}
2560 
2561 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2562 		ql_hw_tx_done_locked(ha, txr_idx);
2563 		if (hw->tx_cntxt[txr_idx].txr_free <=
2564 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
2565         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2566 				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2567 				__func__));
2568 			return (-1);
2569 		}
2570 	}
2571 
2572 	for (i = 0; i < num_tx_cmds; i++) {
2573 		int j;
2574 
2575 		j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2576 
2577 		if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2578 			QL_ASSERT(ha, 0, \
2579 				("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2580 				__func__, __LINE__, txr_idx, j,\
2581 				ha->tx_ring[txr_idx].tx_buf[j].m_head));
2582 			return (EINVAL);
2583 		}
2584 	}
2585 
2586 	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2587 
2588         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2589 
2590                 if (nsegs > ha->hw.max_tx_segs)
2591                         ha->hw.max_tx_segs = nsegs;
2592 
2593                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2594 
2595                 if (op_code) {
2596                         tx_cmd->flags_opcode = op_code;
2597                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2598 
2599                 } else {
2600                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2601                 }
2602 	} else {
2603 		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2604 		ha->tx_tso_frames++;
2605 	}
2606 
2607 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2608         	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2609 
2610 		if (iscsi_pdu)
2611 			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2612 
2613 	} else if (mp->m_flags & M_VLANTAG) {
2614 
2615 		if (hdr_len) { /* TSO */
2616 			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2617 						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2618 			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2619 		} else
2620 			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2621 
2622 		ha->hw_vlan_tx_frames++;
2623 		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2624 
2625 		if (iscsi_pdu) {
2626 			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2627 			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2628 		}
2629 	}
2630 
2631 
2632         tx_cmd->n_bufs = (uint8_t)nsegs;
2633         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2634         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2635 	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2636 
2637 	c_seg = segs;
2638 
2639 	while (1) {
2640 		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2641 
2642 			switch (i) {
2643 			case 0:
2644 				tx_cmd->buf1_addr = c_seg->ds_addr;
2645 				tx_cmd->buf1_len = c_seg->ds_len;
2646 				break;
2647 
2648 			case 1:
2649 				tx_cmd->buf2_addr = c_seg->ds_addr;
2650 				tx_cmd->buf2_len = c_seg->ds_len;
2651 				break;
2652 
2653 			case 2:
2654 				tx_cmd->buf3_addr = c_seg->ds_addr;
2655 				tx_cmd->buf3_len = c_seg->ds_len;
2656 				break;
2657 
2658 			case 3:
2659 				tx_cmd->buf4_addr = c_seg->ds_addr;
2660 				tx_cmd->buf4_len = c_seg->ds_len;
2661 				break;
2662 			}
2663 
2664 			c_seg++;
2665 			nsegs--;
2666 		}
2667 
2668 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2669 			(hw->tx_cntxt[txr_idx].txr_next + 1) &
2670 				(NUM_TX_DESCRIPTORS - 1);
2671 		tx_cmd_count++;
2672 
2673 		if (!nsegs)
2674 			break;
2675 
2676 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2677 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2678 	}
2679 
2680 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2681 
2682 		/* TSO : Copy the header in the following tx cmd descriptors */
2683 
2684 		txr_next = hw->tx_cntxt[txr_idx].txr_next;
2685 
2686 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2687 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2688 
2689 		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2690 		bytes = QL_MIN(bytes, hdr_len);
2691 
2692 		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2693 
2694 		if (mp->m_flags & M_VLANTAG) {
2695 			/* first copy the src/dst MAC addresses */
2696 			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2697 			dst += (ETHER_ADDR_LEN * 2);
2698 			src += (ETHER_ADDR_LEN * 2);
2699 
2700 			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2701 			dst += 2;
2702 			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2703 			dst += 2;
2704 
2705 			/* bytes left in src header */
2706 			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2707 					ETHER_VLAN_ENCAP_LEN);
2708 
2709 			/* bytes left in TxCmd Entry */
2710 			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2711 
2712 
2713 			bcopy(src, dst, bytes);
2714 			src += bytes;
2715 			hdr_len -= bytes;
2716 		} else {
2717 			bcopy(src, dst, bytes);
2718 			src += bytes;
2719 			hdr_len -= bytes;
2720 		}
2721 
2722 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2723 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2724 					(NUM_TX_DESCRIPTORS - 1);
2725 		tx_cmd_count++;
2726 
2727 		while (hdr_len) {
2728 			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2729 			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2730 
2731 			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2732 
2733 			bcopy(src, tx_cmd, bytes);
2734 			src += bytes;
2735 			hdr_len -= bytes;
2736 
2737 			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2738 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2739 					(NUM_TX_DESCRIPTORS - 1);
2740 			tx_cmd_count++;
2741 		}
2742 	}
2743 
2744 	hw->tx_cntxt[txr_idx].txr_free =
2745 		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2746 
2747 	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2748 		txr_idx);
2749        	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2750 
2751 	return (0);
2752 }
2753 
2754 
2755 
2756 #define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2757 static int
2758 qla_config_rss_ind_table(qla_host_t *ha)
2759 {
2760 	uint32_t i, count;
2761 	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2762 
2763 
2764 	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2765 		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2766 	}
2767 
2768 	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2769 		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2770 
2771 		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2772 			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2773 		} else {
2774 			count = Q8_CONFIG_IND_TBL_SIZE;
2775 		}
2776 
2777 		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2778 			rss_ind_tbl))
2779 			return (-1);
2780 	}
2781 
2782 	return (0);
2783 }
2784 
2785 static int
2786 qla_config_soft_lro(qla_host_t *ha)
2787 {
2788         int i;
2789         qla_hw_t *hw = &ha->hw;
2790         struct lro_ctrl *lro;
2791 
2792         for (i = 0; i < hw->num_sds_rings; i++) {
2793                 lro = &hw->sds[i].lro;
2794 
2795 		bzero(lro, sizeof(struct lro_ctrl));
2796 
2797 #if (__FreeBSD_version >= 1100101)
2798                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2799                         device_printf(ha->pci_dev,
2800 				"%s: tcp_lro_init_args [%d] failed\n",
2801                                 __func__, i);
2802                         return (-1);
2803                 }
2804 #else
2805                 if (tcp_lro_init(lro)) {
2806                         device_printf(ha->pci_dev,
2807 				"%s: tcp_lro_init [%d] failed\n",
2808                                 __func__, i);
2809                         return (-1);
2810                 }
2811 #endif /* #if (__FreeBSD_version >= 1100101) */
2812 
2813                 lro->ifp = ha->ifp;
2814         }
2815 
2816         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2817         return (0);
2818 }
2819 
2820 static void
2821 qla_drain_soft_lro(qla_host_t *ha)
2822 {
2823         int i;
2824         qla_hw_t *hw = &ha->hw;
2825         struct lro_ctrl *lro;
2826 
2827        	for (i = 0; i < hw->num_sds_rings; i++) {
2828                	lro = &hw->sds[i].lro;
2829 
2830 #if (__FreeBSD_version >= 1100101)
2831 		tcp_lro_flush_all(lro);
2832 #else
2833                 struct lro_entry *queued;
2834 
2835 		while ((!SLIST_EMPTY(&lro->lro_active))) {
2836 			queued = SLIST_FIRST(&lro->lro_active);
2837 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
2838 			tcp_lro_flush(lro, queued);
2839 		}
2840 #endif /* #if (__FreeBSD_version >= 1100101) */
2841 	}
2842 
2843 	return;
2844 }
2845 
2846 static void
2847 qla_free_soft_lro(qla_host_t *ha)
2848 {
2849         int i;
2850         qla_hw_t *hw = &ha->hw;
2851         struct lro_ctrl *lro;
2852 
2853         for (i = 0; i < hw->num_sds_rings; i++) {
2854                	lro = &hw->sds[i].lro;
2855 		tcp_lro_free(lro);
2856 	}
2857 
2858 	return;
2859 }
2860 
2861 
2862 /*
2863  * Name: ql_del_hw_if
2864  * Function: Destroys the hardware specific entities corresponding to an
2865  *	Ethernet Interface
2866  */
2867 void
2868 ql_del_hw_if(qla_host_t *ha)
2869 {
2870 	uint32_t i;
2871 	uint32_t num_msix;
2872 
2873 	(void)qla_stop_nic_func(ha);
2874 
2875 	qla_del_rcv_cntxt(ha);
2876 
2877 	if(qla_del_xmt_cntxt(ha))
2878 		goto ql_del_hw_if_exit;
2879 
2880 	if (ha->hw.flags.init_intr_cnxt) {
2881 		for (i = 0; i < ha->hw.num_sds_rings; ) {
2882 
2883 			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2884 				num_msix = Q8_MAX_INTR_VECTORS;
2885 			else
2886 				num_msix = ha->hw.num_sds_rings - i;
2887 
2888 			if (qla_config_intr_cntxt(ha, i, num_msix, 0))
2889 				break;
2890 
2891 			i += num_msix;
2892 		}
2893 
2894 		ha->hw.flags.init_intr_cnxt = 0;
2895 	}
2896 
2897 ql_del_hw_if_exit:
2898 	if (ha->hw.enable_soft_lro) {
2899 		qla_drain_soft_lro(ha);
2900 		qla_free_soft_lro(ha);
2901 	}
2902 
2903 	return;
2904 }
2905 
2906 void
2907 qla_confirm_9kb_enable(qla_host_t *ha)
2908 {
2909 //	uint32_t supports_9kb = 0;
2910 
2911 	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2912 
2913 	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2914 	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2915 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2916 
2917 #if 0
2918 	qla_get_nic_partition(ha, &supports_9kb, NULL);
2919 
2920 	if (!supports_9kb)
2921 #endif
2922 	ha->hw.enable_9kb = 0;
2923 
2924 	return;
2925 }
2926 
2927 /*
2928  * Name: ql_init_hw_if
2929  * Function: Creates the hardware specific entities corresponding to an
2930  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2931  *	corresponding to the interface. Enables LRO if allowed.
2932  */
2933 int
2934 ql_init_hw_if(qla_host_t *ha)
2935 {
2936 	device_t	dev;
2937 	uint32_t	i;
2938 	uint8_t		bcast_mac[6];
2939 	qla_rdesc_t	*rdesc;
2940 	uint32_t	num_msix;
2941 
2942 	dev = ha->pci_dev;
2943 
2944 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2945 		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2946 			ha->hw.dma_buf.sds_ring[i].size);
2947 	}
2948 
2949 	for (i = 0; i < ha->hw.num_sds_rings; ) {
2950 
2951 		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2952 			num_msix = Q8_MAX_INTR_VECTORS;
2953 		else
2954 			num_msix = ha->hw.num_sds_rings - i;
2955 
2956 		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2957 
2958 			if (i > 0) {
2959 
2960 				num_msix = i;
2961 
2962 				for (i = 0; i < num_msix; ) {
2963 					qla_config_intr_cntxt(ha, i,
2964 						Q8_MAX_INTR_VECTORS, 0);
2965 					i += Q8_MAX_INTR_VECTORS;
2966 				}
2967 			}
2968 			return (-1);
2969 		}
2970 
2971 		i = i + num_msix;
2972 	}
2973 
2974         ha->hw.flags.init_intr_cnxt = 1;
2975 
2976 	/*
2977 	 * Create Receive Context
2978 	 */
2979 	if (qla_init_rcv_cntxt(ha)) {
2980 		return (-1);
2981 	}
2982 
2983 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2984 		rdesc = &ha->hw.rds[i];
2985 		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2986 		rdesc->rx_in = 0;
2987 		/* Update the RDS Producer Indices */
2988 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2989 			rdesc->rx_next);
2990 	}
2991 
2992 	/*
2993 	 * Create Transmit Context
2994 	 */
2995 	if (qla_init_xmt_cntxt(ha)) {
2996 		qla_del_rcv_cntxt(ha);
2997 		return (-1);
2998 	}
2999 	ha->hw.max_tx_segs = 0;
3000 
3001 	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
3002 		return(-1);
3003 
3004 	ha->hw.flags.unicast_mac = 1;
3005 
3006 	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3007 	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3008 
3009 	if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
3010 		return (-1);
3011 
3012 	ha->hw.flags.bcast_mac = 1;
3013 
3014 	/*
3015 	 * program any cached multicast addresses
3016 	 */
3017 	if (qla_hw_add_all_mcast(ha))
3018 		return (-1);
3019 
3020 	if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
3021 		return (-1);
3022 
3023 	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
3024 		return (-1);
3025 
3026 	if (qla_config_rss_ind_table(ha))
3027 		return (-1);
3028 
3029 	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
3030 		return (-1);
3031 
3032 	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
3033 		return (-1);
3034 
3035 	if (ha->ifp->if_capenable & IFCAP_LRO) {
3036 		if (ha->hw.enable_hw_lro) {
3037 			ha->hw.enable_soft_lro = 0;
3038 
3039 			if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
3040 				return (-1);
3041 		} else {
3042 			ha->hw.enable_soft_lro = 1;
3043 
3044 			if (qla_config_soft_lro(ha))
3045 				return (-1);
3046 		}
3047 	}
3048 
3049         if (qla_init_nic_func(ha))
3050                 return (-1);
3051 
3052         if (qla_query_fw_dcbx_caps(ha))
3053                 return (-1);
3054 
3055 	for (i = 0; i < ha->hw.num_sds_rings; i++)
3056 		QL_ENABLE_INTERRUPTS(ha, i);
3057 
3058 	return (0);
3059 }
3060 
3061 static int
3062 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
3063 {
3064         device_t                dev = ha->pci_dev;
3065         q80_rq_map_sds_to_rds_t *map_rings;
3066 	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
3067         uint32_t                i, err;
3068         qla_hw_t                *hw = &ha->hw;
3069 
3070         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
3071         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
3072 
3073         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
3074         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
3075         map_rings->count_version |= Q8_MBX_CMD_VERSION;
3076 
3077         map_rings->cntxt_id = hw->rcv_cntxt_id;
3078         map_rings->num_rings = num_idx;
3079 
3080 	for (i = 0; i < num_idx; i++) {
3081 		map_rings->sds_rds[i].sds_ring = i + start_idx;
3082 		map_rings->sds_rds[i].rds_ring = i + start_idx;
3083 	}
3084 
3085         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
3086                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
3087                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3088                 device_printf(dev, "%s: failed0\n", __func__);
3089                 return (-1);
3090         }
3091 
3092         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
3093 
3094         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
3095 
3096         if (err) {
3097                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3098                 return (-1);
3099         }
3100 
3101         return (0);
3102 }
3103 
3104 /*
3105  * Name: qla_init_rcv_cntxt
3106  * Function: Creates the Receive Context.
3107  */
3108 static int
3109 qla_init_rcv_cntxt(qla_host_t *ha)
3110 {
3111 	q80_rq_rcv_cntxt_t	*rcntxt;
3112 	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
3113 	q80_stat_desc_t		*sdesc;
3114 	int			i, j;
3115         qla_hw_t		*hw = &ha->hw;
3116 	device_t		dev;
3117 	uint32_t		err;
3118 	uint32_t		rcntxt_sds_rings;
3119 	uint32_t		rcntxt_rds_rings;
3120 	uint32_t		max_idx;
3121 
3122 	dev = ha->pci_dev;
3123 
3124 	/*
3125 	 * Create Receive Context
3126 	 */
3127 
3128 	for (i = 0; i < hw->num_sds_rings; i++) {
3129 		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
3130 
3131 		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
3132 			sdesc->data[0] = 1ULL;
3133 			sdesc->data[1] = 1ULL;
3134 		}
3135 	}
3136 
3137 	rcntxt_sds_rings = hw->num_sds_rings;
3138 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
3139 		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
3140 
3141 	rcntxt_rds_rings = hw->num_rds_rings;
3142 
3143 	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
3144 		rcntxt_rds_rings = MAX_RDS_RING_SETS;
3145 
3146 	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
3147 	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
3148 
3149 	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
3150 	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
3151 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3152 
3153 	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
3154 			Q8_RCV_CNTXT_CAP0_LRO |
3155 			Q8_RCV_CNTXT_CAP0_HW_LRO |
3156 			Q8_RCV_CNTXT_CAP0_RSS |
3157 			Q8_RCV_CNTXT_CAP0_SGL_LRO;
3158 
3159 	if (ha->hw.enable_9kb)
3160 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
3161 	else
3162 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
3163 
3164 	if (ha->hw.num_rds_rings > 1) {
3165 		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
3166 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
3167 	} else
3168 		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
3169 
3170 	rcntxt->nsds_rings = rcntxt_sds_rings;
3171 
3172 	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
3173 
3174 	rcntxt->rcv_vpid = 0;
3175 
3176 	for (i = 0; i <  rcntxt_sds_rings; i++) {
3177 		rcntxt->sds[i].paddr =
3178 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
3179 		rcntxt->sds[i].size =
3180 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3181 		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
3182 		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
3183 	}
3184 
3185 	for (i = 0; i <  rcntxt_rds_rings; i++) {
3186 		rcntxt->rds[i].paddr_std =
3187 			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
3188 
3189 		if (ha->hw.enable_9kb)
3190 			rcntxt->rds[i].std_bsize =
3191 				qla_host_to_le64(MJUM9BYTES);
3192 		else
3193 			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3194 
3195 		rcntxt->rds[i].std_nentries =
3196 			qla_host_to_le32(NUM_RX_DESCRIPTORS);
3197 	}
3198 
3199         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3200 		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
3201                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3202                 device_printf(dev, "%s: failed0\n", __func__);
3203                 return (-1);
3204         }
3205 
3206         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3207 
3208         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3209 
3210         if (err) {
3211                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3212                 return (-1);
3213         }
3214 
3215 	for (i = 0; i <  rcntxt_sds_rings; i++) {
3216 		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3217 	}
3218 
3219 	for (i = 0; i <  rcntxt_rds_rings; i++) {
3220 		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3221 	}
3222 
3223 	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3224 
3225 	ha->hw.flags.init_rx_cnxt = 1;
3226 
3227 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3228 
3229 		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3230 
3231 			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3232 				max_idx = MAX_RCNTXT_SDS_RINGS;
3233 			else
3234 				max_idx = hw->num_sds_rings - i;
3235 
3236 			err = qla_add_rcv_rings(ha, i, max_idx);
3237 			if (err)
3238 				return -1;
3239 
3240 			i += max_idx;
3241 		}
3242 	}
3243 
3244 	if (hw->num_rds_rings > 1) {
3245 
3246 		for (i = 0; i < hw->num_rds_rings; ) {
3247 
3248 			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3249 				max_idx = MAX_SDS_TO_RDS_MAP;
3250 			else
3251 				max_idx = hw->num_rds_rings - i;
3252 
3253 			err = qla_map_sds_to_rds(ha, i, max_idx);
3254 			if (err)
3255 				return -1;
3256 
3257 			i += max_idx;
3258 		}
3259 	}
3260 
3261 	return (0);
3262 }
3263 
3264 static int
3265 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3266 {
3267 	device_t		dev = ha->pci_dev;
3268 	q80_rq_add_rcv_rings_t	*add_rcv;
3269 	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
3270 	uint32_t		i,j, err;
3271         qla_hw_t		*hw = &ha->hw;
3272 
3273 	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3274 	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3275 
3276 	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3277 	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3278 	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3279 
3280 	add_rcv->nrds_sets_rings = nsds | (1 << 5);
3281 	add_rcv->nsds_rings = nsds;
3282 	add_rcv->cntxt_id = hw->rcv_cntxt_id;
3283 
3284         for (i = 0; i <  nsds; i++) {
3285 
3286 		j = i + sds_idx;
3287 
3288                 add_rcv->sds[i].paddr =
3289                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3290 
3291                 add_rcv->sds[i].size =
3292                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3293 
3294                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3295                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3296 
3297         }
3298 
3299         for (i = 0; (i <  nsds); i++) {
3300                 j = i + sds_idx;
3301 
3302                 add_rcv->rds[i].paddr_std =
3303                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3304 
3305 		if (ha->hw.enable_9kb)
3306 			add_rcv->rds[i].std_bsize =
3307 				qla_host_to_le64(MJUM9BYTES);
3308 		else
3309                 	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3310 
3311                 add_rcv->rds[i].std_nentries =
3312                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3313         }
3314 
3315 
3316         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3317 		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
3318                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3319                 device_printf(dev, "%s: failed0\n", __func__);
3320                 return (-1);
3321         }
3322 
3323         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3324 
3325         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3326 
3327         if (err) {
3328                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3329                 return (-1);
3330         }
3331 
3332 	for (i = 0; i < nsds; i++) {
3333 		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3334 	}
3335 
3336 	for (i = 0; i < nsds; i++) {
3337 		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3338 	}
3339 
3340 	return (0);
3341 }
3342 
3343 /*
3344  * Name: qla_del_rcv_cntxt
3345  * Function: Destroys the Receive Context.
3346  */
3347 static void
3348 qla_del_rcv_cntxt(qla_host_t *ha)
3349 {
3350 	device_t			dev = ha->pci_dev;
3351 	q80_rcv_cntxt_destroy_t		*rcntxt;
3352 	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
3353 	uint32_t			err;
3354 	uint8_t				bcast_mac[6];
3355 
3356 	if (!ha->hw.flags.init_rx_cnxt)
3357 		return;
3358 
3359 	if (qla_hw_del_all_mcast(ha))
3360 		return;
3361 
3362 	if (ha->hw.flags.bcast_mac) {
3363 
3364 		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3365 		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3366 
3367 		if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3368 			return;
3369 		ha->hw.flags.bcast_mac = 0;
3370 
3371 	}
3372 
3373 	if (ha->hw.flags.unicast_mac) {
3374 		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3375 			return;
3376 		ha->hw.flags.unicast_mac = 0;
3377 	}
3378 
3379 	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3380 	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3381 
3382 	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3383 	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3384 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3385 
3386 	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3387 
3388         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3389 		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3390                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3391                 device_printf(dev, "%s: failed0\n", __func__);
3392                 return;
3393         }
3394         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3395 
3396         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3397 
3398         if (err) {
3399                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3400         }
3401 
3402 	ha->hw.flags.init_rx_cnxt = 0;
3403 	return;
3404 }
3405 
3406 /*
3407  * Name: qla_init_xmt_cntxt
3408  * Function: Creates the Transmit Context.
3409  */
3410 static int
3411 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3412 {
3413 	device_t		dev;
3414         qla_hw_t		*hw = &ha->hw;
3415 	q80_rq_tx_cntxt_t	*tcntxt;
3416 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
3417 	uint32_t		err;
3418 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3419 	uint32_t		intr_idx;
3420 
3421 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3422 
3423 	dev = ha->pci_dev;
3424 
3425 	/*
3426 	 * Create Transmit Context
3427 	 */
3428 	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3429 	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3430 
3431 	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3432 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3433 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3434 
3435 	intr_idx = txr_idx;
3436 
3437 #ifdef QL_ENABLE_ISCSI_TLV
3438 
3439 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3440 				Q8_TX_CNTXT_CAP0_TC;
3441 
3442 	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3443 		tcntxt->traffic_class = 1;
3444 	}
3445 
3446 	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3447 
3448 #else
3449 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3450 
3451 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3452 
3453 	tcntxt->ntx_rings = 1;
3454 
3455 	tcntxt->tx_ring[0].paddr =
3456 		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3457 	tcntxt->tx_ring[0].tx_consumer =
3458 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3459 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3460 
3461 	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3462 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3463 
3464 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3465 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3466 	*(hw_tx_cntxt->tx_cons) = 0;
3467 
3468         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3469 		(sizeof (q80_rq_tx_cntxt_t) >> 2),
3470                 ha->hw.mbox,
3471 		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3472                 device_printf(dev, "%s: failed0\n", __func__);
3473                 return (-1);
3474         }
3475         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3476 
3477         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3478 
3479         if (err) {
3480                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3481 		return -1;
3482         }
3483 
3484 	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3485 	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3486 
3487 	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3488 		return (-1);
3489 
3490 	return (0);
3491 }
3492 
3493 
3494 /*
3495  * Name: qla_del_xmt_cntxt
3496  * Function: Destroys the Transmit Context.
3497  */
3498 static int
3499 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3500 {
3501 	device_t			dev = ha->pci_dev;
3502 	q80_tx_cntxt_destroy_t		*tcntxt;
3503 	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
3504 	uint32_t			err;
3505 
3506 	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3507 	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3508 
3509 	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3510 	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3511 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3512 
3513 	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3514 
3515         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3516 		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
3517                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3518                 device_printf(dev, "%s: failed0\n", __func__);
3519                 return (-1);
3520         }
3521         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3522 
3523         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3524 
3525         if (err) {
3526                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3527 		return (-1);
3528         }
3529 
3530 	return (0);
3531 }
3532 static int
3533 qla_del_xmt_cntxt(qla_host_t *ha)
3534 {
3535 	uint32_t i;
3536 	int ret = 0;
3537 
3538 	if (!ha->hw.flags.init_tx_cnxt)
3539 		return (ret);
3540 
3541 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3542 		if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
3543 			break;
3544 	}
3545 	ha->hw.flags.init_tx_cnxt = 0;
3546 
3547 	return (ret);
3548 }
3549 
3550 static int
3551 qla_init_xmt_cntxt(qla_host_t *ha)
3552 {
3553 	uint32_t i, j;
3554 
3555 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3556 		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3557 			for (j = 0; j < i; j++) {
3558 				if (qla_del_xmt_cntxt_i(ha, j))
3559 					break;
3560 			}
3561 			return (-1);
3562 		}
3563 	}
3564 	ha->hw.flags.init_tx_cnxt = 1;
3565 	return (0);
3566 }
3567 
3568 static int
3569 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3570 {
3571 	int i, nmcast;
3572 	uint32_t count = 0;
3573 	uint8_t *mcast;
3574 
3575 	nmcast = ha->hw.nmcast;
3576 
3577 	QL_DPRINT2(ha, (ha->pci_dev,
3578 		"%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3579 
3580 	mcast = ha->hw.mac_addr_arr;
3581 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3582 
3583 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3584 		if ((ha->hw.mcast[i].addr[0] != 0) ||
3585 			(ha->hw.mcast[i].addr[1] != 0) ||
3586 			(ha->hw.mcast[i].addr[2] != 0) ||
3587 			(ha->hw.mcast[i].addr[3] != 0) ||
3588 			(ha->hw.mcast[i].addr[4] != 0) ||
3589 			(ha->hw.mcast[i].addr[5] != 0)) {
3590 
3591 			bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3592 			mcast = mcast + ETHER_ADDR_LEN;
3593 			count++;
3594 
3595 			device_printf(ha->pci_dev,
3596 				"%s: %x:%x:%x:%x:%x:%x \n",
3597 				__func__, ha->hw.mcast[i].addr[0],
3598 				ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2],
3599 				ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4],
3600 				ha->hw.mcast[i].addr[5]);
3601 
3602 			if (count == Q8_MAX_MAC_ADDRS) {
3603 				if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3604 					add_mcast, count)) {
3605                 			device_printf(ha->pci_dev,
3606 						"%s: failed\n", __func__);
3607 					return (-1);
3608 				}
3609 
3610 				count = 0;
3611 				mcast = ha->hw.mac_addr_arr;
3612 				memset(mcast, 0,
3613 					(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3614 			}
3615 
3616 			nmcast--;
3617 		}
3618 	}
3619 
3620 	if (count) {
3621 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3622 			count)) {
3623                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3624 			return (-1);
3625 		}
3626 	}
3627 	QL_DPRINT2(ha, (ha->pci_dev,
3628 		"%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3629 
3630 	return 0;
3631 }
3632 
3633 static int
3634 qla_hw_add_all_mcast(qla_host_t *ha)
3635 {
3636 	int ret;
3637 
3638 	ret = qla_hw_all_mcast(ha, 1);
3639 
3640 	return (ret);
3641 }
3642 
3643 int
3644 qla_hw_del_all_mcast(qla_host_t *ha)
3645 {
3646 	int ret;
3647 
3648 	ret = qla_hw_all_mcast(ha, 0);
3649 
3650 	bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3651 	ha->hw.nmcast = 0;
3652 
3653 	return (ret);
3654 }
3655 
3656 static int
3657 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3658 {
3659 	int i;
3660 
3661 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3662 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3663 			return (0); /* its been already added */
3664 	}
3665 	return (-1);
3666 }
3667 
3668 static int
3669 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3670 {
3671 	int i;
3672 
3673 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3674 
3675 		if ((ha->hw.mcast[i].addr[0] == 0) &&
3676 			(ha->hw.mcast[i].addr[1] == 0) &&
3677 			(ha->hw.mcast[i].addr[2] == 0) &&
3678 			(ha->hw.mcast[i].addr[3] == 0) &&
3679 			(ha->hw.mcast[i].addr[4] == 0) &&
3680 			(ha->hw.mcast[i].addr[5] == 0)) {
3681 
3682 			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3683 			ha->hw.nmcast++;
3684 
3685 			mta = mta + ETHER_ADDR_LEN;
3686 			nmcast--;
3687 
3688 			if (nmcast == 0)
3689 				break;
3690 		}
3691 
3692 	}
3693 	return 0;
3694 }
3695 
3696 static int
3697 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3698 {
3699 	int i;
3700 
3701 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3702 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3703 
3704 			ha->hw.mcast[i].addr[0] = 0;
3705 			ha->hw.mcast[i].addr[1] = 0;
3706 			ha->hw.mcast[i].addr[2] = 0;
3707 			ha->hw.mcast[i].addr[3] = 0;
3708 			ha->hw.mcast[i].addr[4] = 0;
3709 			ha->hw.mcast[i].addr[5] = 0;
3710 
3711 			ha->hw.nmcast--;
3712 
3713 			mta = mta + ETHER_ADDR_LEN;
3714 			nmcast--;
3715 
3716 			if (nmcast == 0)
3717 				break;
3718 		}
3719 	}
3720 	return 0;
3721 }
3722 
3723 /*
3724  * Name: ql_hw_set_multi
3725  * Function: Sets the Multicast Addresses provided by the host O.S into the
3726  *	hardware (for the given interface)
3727  */
3728 int
3729 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3730 	uint32_t add_mac)
3731 {
3732 	uint8_t *mta = mcast_addr;
3733 	int i;
3734 	int ret = 0;
3735 	uint32_t count = 0;
3736 	uint8_t *mcast;
3737 
3738 	mcast = ha->hw.mac_addr_arr;
3739 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3740 
3741 	for (i = 0; i < mcnt; i++) {
3742 		if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3743 			if (add_mac) {
3744 				if (qla_hw_mac_addr_present(ha, mta) != 0) {
3745 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3746 					mcast = mcast + ETHER_ADDR_LEN;
3747 					count++;
3748 				}
3749 			} else {
3750 				if (qla_hw_mac_addr_present(ha, mta) == 0) {
3751 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3752 					mcast = mcast + ETHER_ADDR_LEN;
3753 					count++;
3754 				}
3755 			}
3756 		}
3757 		if (count == Q8_MAX_MAC_ADDRS) {
3758 			if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3759 				add_mac, count)) {
3760                 		device_printf(ha->pci_dev, "%s: failed\n",
3761 					__func__);
3762 				return (-1);
3763 			}
3764 
3765 			if (add_mac) {
3766 				qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3767 					count);
3768 			} else {
3769 				qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3770 					count);
3771 			}
3772 
3773 			count = 0;
3774 			mcast = ha->hw.mac_addr_arr;
3775 			memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3776 		}
3777 
3778 		mta += Q8_MAC_ADDR_LEN;
3779 	}
3780 
3781 	if (count) {
3782 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3783 			count)) {
3784                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3785 			return (-1);
3786 		}
3787 		if (add_mac) {
3788 			qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3789 		} else {
3790 			qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3791 		}
3792 	}
3793 
3794 	return (ret);
3795 }
3796 
3797 /*
3798  * Name: ql_hw_tx_done_locked
3799  * Function: Handle Transmit Completions
3800  */
3801 void
3802 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3803 {
3804 	qla_tx_buf_t *txb;
3805         qla_hw_t *hw = &ha->hw;
3806 	uint32_t comp_idx, comp_count = 0;
3807 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
3808 
3809 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3810 
3811 	/* retrieve index of last entry in tx ring completed */
3812 	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3813 
3814 	while (comp_idx != hw_tx_cntxt->txr_comp) {
3815 
3816 		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3817 
3818 		hw_tx_cntxt->txr_comp++;
3819 		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3820 			hw_tx_cntxt->txr_comp = 0;
3821 
3822 		comp_count++;
3823 
3824 		if (txb->m_head) {
3825 			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3826 
3827 			bus_dmamap_sync(ha->tx_tag, txb->map,
3828 				BUS_DMASYNC_POSTWRITE);
3829 			bus_dmamap_unload(ha->tx_tag, txb->map);
3830 			m_freem(txb->m_head);
3831 
3832 			txb->m_head = NULL;
3833 		}
3834 	}
3835 
3836 	hw_tx_cntxt->txr_free += comp_count;
3837 
3838 	if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS)
3839 		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d"
3840 			"txr_next = %d txr_comp = %d\n", __func__, __LINE__,
3841 			txr_idx, hw_tx_cntxt->txr_free,
3842 			hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp);
3843 
3844 	QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \
3845 		("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
3846 		__func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
3847 		hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
3848 
3849 	return;
3850 }
3851 
3852 void
3853 ql_update_link_state(qla_host_t *ha)
3854 {
3855 	uint32_t link_state = 0;
3856 	uint32_t prev_link_state;
3857 
3858 	prev_link_state =  ha->hw.link_up;
3859 
3860 	if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) {
3861 		link_state = READ_REG32(ha, Q8_LINK_STATE);
3862 
3863 		if (ha->pci_func == 0) {
3864 			link_state = (((link_state & 0xF) == 1)? 1 : 0);
3865 		} else {
3866 			link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3867 		}
3868 	}
3869 
3870 	atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
3871 
3872 	if (prev_link_state !=  ha->hw.link_up) {
3873 		if (ha->hw.link_up) {
3874 			if_link_state_change(ha->ifp, LINK_STATE_UP);
3875 		} else {
3876 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3877 		}
3878 	}
3879 	return;
3880 }
3881 
3882 int
3883 ql_hw_check_health(qla_host_t *ha)
3884 {
3885 	uint32_t val;
3886 
3887 	ha->hw.health_count++;
3888 
3889 	if (ha->hw.health_count < 500)
3890 		return 0;
3891 
3892 	ha->hw.health_count = 0;
3893 
3894 	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3895 
3896 	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3897 		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3898 		device_printf(ha->pci_dev, "%s: Temperature Alert"
3899 			" at ts_usecs %ld ts_reg = 0x%08x\n",
3900 			__func__, qla_get_usec_timestamp(), val);
3901 
3902 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
3903 			ha->hw.sp_log_stop = -1;
3904 
3905 		QL_INITIATE_RECOVERY(ha);
3906 		return -1;
3907 	}
3908 
3909 	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3910 
3911 	if ((val != ha->hw.hbeat_value) &&
3912 		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3913 		ha->hw.hbeat_value = val;
3914 		ha->hw.hbeat_failure = 0;
3915 		return 0;
3916 	}
3917 
3918 	ha->hw.hbeat_failure++;
3919 
3920 
3921 	if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3922 		device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3923 			__func__, val);
3924 	if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3925 		return 0;
3926 	else {
3927 		uint32_t peg_halt_status1;
3928 		uint32_t peg_halt_status2;
3929 
3930 		peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
3931 		peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
3932 
3933 		device_printf(ha->pci_dev,
3934 			"%s: Heartbeat Failue at ts_usecs = %ld "
3935 			"fw_heart_beat = 0x%08x "
3936 			"peg_halt_status1 = 0x%08x "
3937 			"peg_halt_status2 = 0x%08x\n",
3938 			__func__, qla_get_usec_timestamp(), val,
3939 			peg_halt_status1, peg_halt_status2);
3940 
3941 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
3942 			ha->hw.sp_log_stop = -1;
3943 	}
3944 	QL_INITIATE_RECOVERY(ha);
3945 
3946 	return -1;
3947 }
3948 
3949 static int
3950 qla_init_nic_func(qla_host_t *ha)
3951 {
3952         device_t                dev;
3953         q80_init_nic_func_t     *init_nic;
3954         q80_init_nic_func_rsp_t *init_nic_rsp;
3955         uint32_t                err;
3956 
3957         dev = ha->pci_dev;
3958 
3959         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3960         bzero(init_nic, sizeof(q80_init_nic_func_t));
3961 
3962         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3963         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3964         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3965 
3966         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3967         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3968         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3969 
3970 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3971         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3972                 (sizeof (q80_init_nic_func_t) >> 2),
3973                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3974                 device_printf(dev, "%s: failed\n", __func__);
3975                 return -1;
3976         }
3977 
3978         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3979 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3980 
3981         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3982 
3983         if (err) {
3984                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3985         } else {
3986                 device_printf(dev, "%s: successful\n", __func__);
3987 	}
3988 
3989         return 0;
3990 }
3991 
3992 static int
3993 qla_stop_nic_func(qla_host_t *ha)
3994 {
3995         device_t                dev;
3996         q80_stop_nic_func_t     *stop_nic;
3997         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3998         uint32_t                err;
3999 
4000         dev = ha->pci_dev;
4001 
4002         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
4003         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
4004 
4005         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
4006         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
4007         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
4008 
4009         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
4010         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
4011 
4012 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
4013         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
4014                 (sizeof (q80_stop_nic_func_t) >> 2),
4015                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
4016                 device_printf(dev, "%s: failed\n", __func__);
4017                 return -1;
4018         }
4019 
4020         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
4021 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
4022 
4023         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
4024 
4025         if (err) {
4026                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4027         }
4028 
4029         return 0;
4030 }
4031 
4032 static int
4033 qla_query_fw_dcbx_caps(qla_host_t *ha)
4034 {
4035         device_t                        dev;
4036         q80_query_fw_dcbx_caps_t        *fw_dcbx;
4037         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
4038         uint32_t                        err;
4039 
4040         dev = ha->pci_dev;
4041 
4042         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
4043         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
4044 
4045         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
4046         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
4047         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
4048 
4049         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
4050         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
4051                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
4052                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
4053                 device_printf(dev, "%s: failed\n", __func__);
4054                 return -1;
4055         }
4056 
4057         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
4058         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
4059                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
4060 
4061         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
4062 
4063         if (err) {
4064                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4065         }
4066 
4067         return 0;
4068 }
4069 
4070 static int
4071 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
4072         uint32_t aen_mb3, uint32_t aen_mb4)
4073 {
4074         device_t                dev;
4075         q80_idc_ack_t           *idc_ack;
4076         q80_idc_ack_rsp_t       *idc_ack_rsp;
4077         uint32_t                err;
4078         int                     count = 300;
4079 
4080         dev = ha->pci_dev;
4081 
4082         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
4083         bzero(idc_ack, sizeof(q80_idc_ack_t));
4084 
4085         idc_ack->opcode = Q8_MBX_IDC_ACK;
4086         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
4087         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
4088 
4089         idc_ack->aen_mb1 = aen_mb1;
4090         idc_ack->aen_mb2 = aen_mb2;
4091         idc_ack->aen_mb3 = aen_mb3;
4092         idc_ack->aen_mb4 = aen_mb4;
4093 
4094         ha->hw.imd_compl= 0;
4095 
4096         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
4097                 (sizeof (q80_idc_ack_t) >> 2),
4098                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
4099                 device_printf(dev, "%s: failed\n", __func__);
4100                 return -1;
4101         }
4102 
4103         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
4104 
4105         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
4106 
4107         if (err) {
4108                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4109                 return(-1);
4110         }
4111 
4112         while (count && !ha->hw.imd_compl) {
4113                 qla_mdelay(__func__, 100);
4114                 count--;
4115         }
4116 
4117         if (!count)
4118                 return -1;
4119         else
4120                 device_printf(dev, "%s: count %d\n", __func__, count);
4121 
4122         return (0);
4123 }
4124 
4125 static int
4126 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
4127 {
4128         device_t                dev;
4129         q80_set_port_cfg_t      *pcfg;
4130         q80_set_port_cfg_rsp_t  *pfg_rsp;
4131         uint32_t                err;
4132         int                     count = 300;
4133 
4134         dev = ha->pci_dev;
4135 
4136         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
4137         bzero(pcfg, sizeof(q80_set_port_cfg_t));
4138 
4139         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
4140         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
4141         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4142 
4143         pcfg->cfg_bits = cfg_bits;
4144 
4145         device_printf(dev, "%s: cfg_bits"
4146                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4147                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4148                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4149                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4150                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
4151 
4152         ha->hw.imd_compl= 0;
4153 
4154         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4155                 (sizeof (q80_set_port_cfg_t) >> 2),
4156                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
4157                 device_printf(dev, "%s: failed\n", __func__);
4158                 return -1;
4159         }
4160 
4161         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
4162 
4163         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
4164 
4165         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
4166                 while (count && !ha->hw.imd_compl) {
4167                         qla_mdelay(__func__, 100);
4168                         count--;
4169                 }
4170                 if (count) {
4171                         device_printf(dev, "%s: count %d\n", __func__, count);
4172 
4173                         err = 0;
4174                 }
4175         }
4176 
4177         if (err) {
4178                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4179                 return(-1);
4180         }
4181 
4182         return (0);
4183 }
4184 
4185 
4186 static int
4187 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
4188 {
4189 	uint32_t			err;
4190 	device_t			dev = ha->pci_dev;
4191 	q80_config_md_templ_size_t	*md_size;
4192 	q80_config_md_templ_size_rsp_t	*md_size_rsp;
4193 
4194 #ifndef QL_LDFLASH_FW
4195 
4196 	ql_minidump_template_hdr_t *hdr;
4197 
4198 	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
4199 	*size = hdr->size_of_template;
4200 	return (0);
4201 
4202 #endif /* #ifdef QL_LDFLASH_FW */
4203 
4204 	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
4205 	bzero(md_size, sizeof(q80_config_md_templ_size_t));
4206 
4207 	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
4208 	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
4209 	md_size->count_version |= Q8_MBX_CMD_VERSION;
4210 
4211 	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
4212 		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
4213 		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
4214 
4215 		device_printf(dev, "%s: failed\n", __func__);
4216 
4217 		return (-1);
4218 	}
4219 
4220 	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
4221 
4222 	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
4223 
4224         if (err) {
4225 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4226 		return(-1);
4227         }
4228 
4229 	*size = md_size_rsp->templ_size;
4230 
4231 	return (0);
4232 }
4233 
4234 static int
4235 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
4236 {
4237         device_t                dev;
4238         q80_get_port_cfg_t      *pcfg;
4239         q80_get_port_cfg_rsp_t  *pcfg_rsp;
4240         uint32_t                err;
4241 
4242         dev = ha->pci_dev;
4243 
4244         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
4245         bzero(pcfg, sizeof(q80_get_port_cfg_t));
4246 
4247         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4248         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4249         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4250 
4251         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4252                 (sizeof (q80_get_port_cfg_t) >> 2),
4253                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4254                 device_printf(dev, "%s: failed\n", __func__);
4255                 return -1;
4256         }
4257 
4258         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4259 
4260         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4261 
4262         if (err) {
4263                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4264                 return(-1);
4265         }
4266 
4267         device_printf(dev, "%s: [cfg_bits, port type]"
4268                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4269                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4270                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4271                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4272                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4273                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4274                 );
4275 
4276         *cfg_bits = pcfg_rsp->cfg_bits;
4277 
4278         return (0);
4279 }
4280 
4281 int
4282 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4283 {
4284         struct ether_vlan_header        *eh;
4285         uint16_t                        etype;
4286         struct ip                       *ip = NULL;
4287         struct ip6_hdr                  *ip6 = NULL;
4288         struct tcphdr                   *th = NULL;
4289         uint32_t                        hdrlen;
4290         uint32_t                        offset;
4291         uint8_t                         buf[sizeof(struct ip6_hdr)];
4292 
4293         eh = mtod(mp, struct ether_vlan_header *);
4294 
4295         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4296                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4297                 etype = ntohs(eh->evl_proto);
4298         } else {
4299                 hdrlen = ETHER_HDR_LEN;
4300                 etype = ntohs(eh->evl_encap_proto);
4301         }
4302 
4303 	if (etype == ETHERTYPE_IP) {
4304 
4305 		offset = (hdrlen + sizeof (struct ip));
4306 
4307 		if (mp->m_len >= offset) {
4308                         ip = (struct ip *)(mp->m_data + hdrlen);
4309 		} else {
4310 			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4311                         ip = (struct ip *)buf;
4312 		}
4313 
4314                 if (ip->ip_p == IPPROTO_TCP) {
4315 
4316 			hdrlen += ip->ip_hl << 2;
4317 			offset = hdrlen + 4;
4318 
4319 			if (mp->m_len >= offset) {
4320 				th = (struct tcphdr *)(mp->m_data + hdrlen);
4321 			} else {
4322                                 m_copydata(mp, hdrlen, 4, buf);
4323 				th = (struct tcphdr *)buf;
4324 			}
4325                 }
4326 
4327 	} else if (etype == ETHERTYPE_IPV6) {
4328 
4329 		offset = (hdrlen + sizeof (struct ip6_hdr));
4330 
4331 		if (mp->m_len >= offset) {
4332                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4333 		} else {
4334                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4335                         ip6 = (struct ip6_hdr *)buf;
4336 		}
4337 
4338                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4339 
4340 			hdrlen += sizeof(struct ip6_hdr);
4341 			offset = hdrlen + 4;
4342 
4343 			if (mp->m_len >= offset) {
4344 				th = (struct tcphdr *)(mp->m_data + hdrlen);
4345 			} else {
4346 				m_copydata(mp, hdrlen, 4, buf);
4347 				th = (struct tcphdr *)buf;
4348 			}
4349                 }
4350 	}
4351 
4352         if (th != NULL) {
4353                 if ((th->th_sport == htons(3260)) ||
4354                         (th->th_dport == htons(3260)))
4355                         return 0;
4356         }
4357         return (-1);
4358 }
4359 
4360 void
4361 qla_hw_async_event(qla_host_t *ha)
4362 {
4363         switch (ha->hw.aen_mb0) {
4364         case 0x8101:
4365                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4366                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4367 
4368                 break;
4369 
4370         default:
4371                 break;
4372         }
4373 
4374         return;
4375 }
4376 
4377 #ifdef QL_LDFLASH_FW
4378 static int
4379 ql_get_minidump_template(qla_host_t *ha)
4380 {
4381 	uint32_t			err;
4382 	device_t			dev = ha->pci_dev;
4383 	q80_config_md_templ_cmd_t	*md_templ;
4384 	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
4385 
4386 	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4387 	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4388 
4389 	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4390 	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4391 	md_templ->count_version |= Q8_MBX_CMD_VERSION;
4392 
4393 	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4394 	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4395 
4396 	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4397 		(sizeof(q80_config_md_templ_cmd_t) >> 2),
4398 		 ha->hw.mbox,
4399 		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4400 
4401 		device_printf(dev, "%s: failed\n", __func__);
4402 
4403 		return (-1);
4404 	}
4405 
4406 	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4407 
4408 	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4409 
4410 	if (err) {
4411 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4412 		return (-1);
4413 	}
4414 
4415 	return (0);
4416 
4417 }
4418 #endif /* #ifdef QL_LDFLASH_FW */
4419 
4420 /*
4421  * Minidump related functionality
4422  */
4423 
4424 static int ql_parse_template(qla_host_t *ha);
4425 
4426 static uint32_t ql_rdcrb(qla_host_t *ha,
4427 			ql_minidump_entry_rdcrb_t *crb_entry,
4428 			uint32_t * data_buff);
4429 
4430 static uint32_t ql_pollrd(qla_host_t *ha,
4431 			ql_minidump_entry_pollrd_t *entry,
4432 			uint32_t * data_buff);
4433 
4434 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4435 			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4436 			uint32_t *data_buff);
4437 
4438 static uint32_t ql_L2Cache(qla_host_t *ha,
4439 			ql_minidump_entry_cache_t *cacheEntry,
4440 			uint32_t * data_buff);
4441 
4442 static uint32_t ql_L1Cache(qla_host_t *ha,
4443 			ql_minidump_entry_cache_t *cacheEntry,
4444 			uint32_t *data_buff);
4445 
4446 static uint32_t ql_rdocm(qla_host_t *ha,
4447 			ql_minidump_entry_rdocm_t *ocmEntry,
4448 			uint32_t *data_buff);
4449 
4450 static uint32_t ql_rdmem(qla_host_t *ha,
4451 			ql_minidump_entry_rdmem_t *mem_entry,
4452 			uint32_t *data_buff);
4453 
4454 static uint32_t ql_rdrom(qla_host_t *ha,
4455 			ql_minidump_entry_rdrom_t *romEntry,
4456 			uint32_t *data_buff);
4457 
4458 static uint32_t ql_rdmux(qla_host_t *ha,
4459 			ql_minidump_entry_mux_t *muxEntry,
4460 			uint32_t *data_buff);
4461 
4462 static uint32_t ql_rdmux2(qla_host_t *ha,
4463 			ql_minidump_entry_mux2_t *muxEntry,
4464 			uint32_t *data_buff);
4465 
4466 static uint32_t ql_rdqueue(qla_host_t *ha,
4467 			ql_minidump_entry_queue_t *queueEntry,
4468 			uint32_t *data_buff);
4469 
4470 static uint32_t ql_cntrl(qla_host_t *ha,
4471 			ql_minidump_template_hdr_t *template_hdr,
4472 			ql_minidump_entry_cntrl_t *crbEntry);
4473 
4474 
4475 static uint32_t
4476 ql_minidump_size(qla_host_t *ha)
4477 {
4478 	uint32_t i, k;
4479 	uint32_t size = 0;
4480 	ql_minidump_template_hdr_t *hdr;
4481 
4482 	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4483 
4484 	i = 0x2;
4485 
4486 	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4487 		if (i & ha->hw.mdump_capture_mask)
4488 			size += hdr->capture_size_array[k];
4489 		i = i << 1;
4490 	}
4491 	return (size);
4492 }
4493 
4494 static void
4495 ql_free_minidump_buffer(qla_host_t *ha)
4496 {
4497 	if (ha->hw.mdump_buffer != NULL) {
4498 		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4499 		ha->hw.mdump_buffer = NULL;
4500 		ha->hw.mdump_buffer_size = 0;
4501 	}
4502 	return;
4503 }
4504 
4505 static int
4506 ql_alloc_minidump_buffer(qla_host_t *ha)
4507 {
4508 	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4509 
4510 	if (!ha->hw.mdump_buffer_size)
4511 		return (-1);
4512 
4513 	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4514 					M_NOWAIT);
4515 
4516 	if (ha->hw.mdump_buffer == NULL)
4517 		return (-1);
4518 
4519 	return (0);
4520 }
4521 
4522 static void
4523 ql_free_minidump_template_buffer(qla_host_t *ha)
4524 {
4525 	if (ha->hw.mdump_template != NULL) {
4526 		free(ha->hw.mdump_template, M_QLA83XXBUF);
4527 		ha->hw.mdump_template = NULL;
4528 		ha->hw.mdump_template_size = 0;
4529 	}
4530 	return;
4531 }
4532 
4533 static int
4534 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4535 {
4536 	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4537 
4538 	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4539 					M_QLA83XXBUF, M_NOWAIT);
4540 
4541 	if (ha->hw.mdump_template == NULL)
4542 		return (-1);
4543 
4544 	return (0);
4545 }
4546 
4547 static int
4548 ql_alloc_minidump_buffers(qla_host_t *ha)
4549 {
4550 	int ret;
4551 
4552 	ret = ql_alloc_minidump_template_buffer(ha);
4553 
4554 	if (ret)
4555 		return (ret);
4556 
4557 	ret = ql_alloc_minidump_buffer(ha);
4558 
4559 	if (ret)
4560 		ql_free_minidump_template_buffer(ha);
4561 
4562 	return (ret);
4563 }
4564 
4565 
4566 static uint32_t
4567 ql_validate_minidump_checksum(qla_host_t *ha)
4568 {
4569         uint64_t sum = 0;
4570 	int count;
4571 	uint32_t *template_buff;
4572 
4573 	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4574 	template_buff = ha->hw.dma_buf.minidump.dma_b;
4575 
4576 	while (count-- > 0) {
4577 		sum += *template_buff++;
4578 	}
4579 
4580 	while (sum >> 32) {
4581 		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4582 	}
4583 
4584 	return (~sum);
4585 }
4586 
4587 int
4588 ql_minidump_init(qla_host_t *ha)
4589 {
4590 	int		ret = 0;
4591 	uint32_t	template_size = 0;
4592 	device_t	dev = ha->pci_dev;
4593 
4594 	/*
4595 	 * Get Minidump Template Size
4596  	 */
4597 	ret = qla_get_minidump_tmplt_size(ha, &template_size);
4598 
4599 	if (ret || (template_size == 0)) {
4600 		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4601 			template_size);
4602 		return (-1);
4603 	}
4604 
4605 	/*
4606 	 * Allocate Memory for Minidump Template
4607 	 */
4608 
4609 	ha->hw.dma_buf.minidump.alignment = 8;
4610 	ha->hw.dma_buf.minidump.size = template_size;
4611 
4612 #ifdef QL_LDFLASH_FW
4613 	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4614 
4615 		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4616 
4617 		return (-1);
4618 	}
4619 	ha->hw.dma_buf.flags.minidump = 1;
4620 
4621 	/*
4622 	 * Retrieve Minidump Template
4623 	 */
4624 	ret = ql_get_minidump_template(ha);
4625 #else
4626 	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4627 
4628 #endif /* #ifdef QL_LDFLASH_FW */
4629 
4630 	if (ret == 0) {
4631 
4632 		ret = ql_validate_minidump_checksum(ha);
4633 
4634 		if (ret == 0) {
4635 
4636 			ret = ql_alloc_minidump_buffers(ha);
4637 
4638 			if (ret == 0)
4639 		ha->hw.mdump_init = 1;
4640 			else
4641 				device_printf(dev,
4642 					"%s: ql_alloc_minidump_buffers"
4643 					" failed\n", __func__);
4644 		} else {
4645 			device_printf(dev, "%s: ql_validate_minidump_checksum"
4646 				" failed\n", __func__);
4647 		}
4648 	} else {
4649 		device_printf(dev, "%s: ql_get_minidump_template failed\n",
4650 			 __func__);
4651 	}
4652 
4653 	if (ret)
4654 		ql_minidump_free(ha);
4655 
4656 	return (ret);
4657 }
4658 
4659 static void
4660 ql_minidump_free(qla_host_t *ha)
4661 {
4662 	ha->hw.mdump_init = 0;
4663 	if (ha->hw.dma_buf.flags.minidump) {
4664 		ha->hw.dma_buf.flags.minidump = 0;
4665 		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4666 	}
4667 
4668 	ql_free_minidump_template_buffer(ha);
4669 	ql_free_minidump_buffer(ha);
4670 
4671 	return;
4672 }
4673 
4674 void
4675 ql_minidump(qla_host_t *ha)
4676 {
4677 	if (!ha->hw.mdump_init)
4678 		return;
4679 
4680 	if (ha->hw.mdump_done)
4681 		return;
4682 	ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
4683 	ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4684 
4685 	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4686 	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4687 
4688 	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4689 		ha->hw.mdump_template_size);
4690 
4691 	ql_parse_template(ha);
4692 
4693 	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4694 
4695 	ha->hw.mdump_done = 1;
4696 
4697 	return;
4698 }
4699 
4700 
4701 /*
4702  * helper routines
4703  */
4704 static void
4705 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4706 {
4707 	if (esize != entry->hdr.entry_capture_size) {
4708 		entry->hdr.entry_capture_size = esize;
4709 		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4710 	}
4711 	return;
4712 }
4713 
4714 
4715 static int
4716 ql_parse_template(qla_host_t *ha)
4717 {
4718 	uint32_t num_of_entries, buff_level, e_cnt, esize;
4719 	uint32_t end_cnt, rv = 0;
4720 	char *dump_buff, *dbuff;
4721 	int sane_start = 0, sane_end = 0;
4722 	ql_minidump_template_hdr_t *template_hdr;
4723 	ql_minidump_entry_t *entry;
4724 	uint32_t capture_mask;
4725 	uint32_t dump_size;
4726 
4727 	/* Setup parameters */
4728 	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4729 
4730 	if (template_hdr->entry_type == TLHDR)
4731 		sane_start = 1;
4732 
4733 	dump_buff = (char *) ha->hw.mdump_buffer;
4734 
4735 	num_of_entries = template_hdr->num_of_entries;
4736 
4737 	entry = (ql_minidump_entry_t *) ((char *)template_hdr
4738 			+ template_hdr->first_entry_offset );
4739 
4740 	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4741 		template_hdr->ocm_window_array[ha->pci_func];
4742 	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4743 
4744 	capture_mask = ha->hw.mdump_capture_mask;
4745 	dump_size = ha->hw.mdump_buffer_size;
4746 
4747 	template_hdr->driver_capture_mask = capture_mask;
4748 
4749 	QL_DPRINT80(ha, (ha->pci_dev,
4750 		"%s: sane_start = %d num_of_entries = %d "
4751 		"capture_mask = 0x%x dump_size = %d \n",
4752 		__func__, sane_start, num_of_entries, capture_mask, dump_size));
4753 
4754 	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4755 
4756 		/*
4757 		 * If the capture_mask of the entry does not match capture mask
4758 		 * skip the entry after marking the driver_flags indicator.
4759 		 */
4760 
4761 		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4762 
4763 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4764 			entry = (ql_minidump_entry_t *) ((char *) entry
4765 					+ entry->hdr.entry_size);
4766 			continue;
4767 		}
4768 
4769 		/*
4770 		 * This is ONLY needed in implementations where
4771 		 * the capture buffer allocated is too small to capture
4772 		 * all of the required entries for a given capture mask.
4773 		 * We need to empty the buffer contents to a file
4774 		 * if possible, before processing the next entry
4775 		 * If the buff_full_flag is set, no further capture will happen
4776 		 * and all remaining non-control entries will be skipped.
4777 		 */
4778 		if (entry->hdr.entry_capture_size != 0) {
4779 			if ((buff_level + entry->hdr.entry_capture_size) >
4780 				dump_size) {
4781 				/*  Try to recover by emptying buffer to file */
4782 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4783 				entry = (ql_minidump_entry_t *) ((char *) entry
4784 						+ entry->hdr.entry_size);
4785 				continue;
4786 			}
4787 		}
4788 
4789 		/*
4790 		 * Decode the entry type and process it accordingly
4791 		 */
4792 
4793 		switch (entry->hdr.entry_type) {
4794 		case RDNOP:
4795 			break;
4796 
4797 		case RDEND:
4798 			if (sane_end == 0) {
4799 				end_cnt = e_cnt;
4800 			}
4801 			sane_end++;
4802 			break;
4803 
4804 		case RDCRB:
4805 			dbuff = dump_buff + buff_level;
4806 			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4807 			ql_entry_err_chk(entry, esize);
4808 			buff_level += esize;
4809 			break;
4810 
4811                 case POLLRD:
4812                         dbuff = dump_buff + buff_level;
4813                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4814                         ql_entry_err_chk(entry, esize);
4815                         buff_level += esize;
4816                         break;
4817 
4818                 case POLLRDMWR:
4819                         dbuff = dump_buff + buff_level;
4820                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4821 					(void *)dbuff);
4822                         ql_entry_err_chk(entry, esize);
4823                         buff_level += esize;
4824                         break;
4825 
4826 		case L2ITG:
4827 		case L2DTG:
4828 		case L2DAT:
4829 		case L2INS:
4830 			dbuff = dump_buff + buff_level;
4831 			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4832 			if (esize == -1) {
4833 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4834 			} else {
4835 				ql_entry_err_chk(entry, esize);
4836 				buff_level += esize;
4837 			}
4838 			break;
4839 
4840 		case L1DAT:
4841 		case L1INS:
4842 			dbuff = dump_buff + buff_level;
4843 			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4844 			ql_entry_err_chk(entry, esize);
4845 			buff_level += esize;
4846 			break;
4847 
4848 		case RDOCM:
4849 			dbuff = dump_buff + buff_level;
4850 			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4851 			ql_entry_err_chk(entry, esize);
4852 			buff_level += esize;
4853 			break;
4854 
4855 		case RDMEM:
4856 			dbuff = dump_buff + buff_level;
4857 			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4858 			ql_entry_err_chk(entry, esize);
4859 			buff_level += esize;
4860 			break;
4861 
4862 		case BOARD:
4863 		case RDROM:
4864 			dbuff = dump_buff + buff_level;
4865 			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4866 			ql_entry_err_chk(entry, esize);
4867 			buff_level += esize;
4868 			break;
4869 
4870 		case RDMUX:
4871 			dbuff = dump_buff + buff_level;
4872 			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4873 			ql_entry_err_chk(entry, esize);
4874 			buff_level += esize;
4875 			break;
4876 
4877                 case RDMUX2:
4878                         dbuff = dump_buff + buff_level;
4879                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4880                         ql_entry_err_chk(entry, esize);
4881                         buff_level += esize;
4882                         break;
4883 
4884 		case QUEUE:
4885 			dbuff = dump_buff + buff_level;
4886 			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4887 			ql_entry_err_chk(entry, esize);
4888 			buff_level += esize;
4889 			break;
4890 
4891 		case CNTRL:
4892 			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4893 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4894 			}
4895 			break;
4896 		default:
4897 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4898 			break;
4899 		}
4900 		/*  next entry in the template */
4901 		entry = (ql_minidump_entry_t *) ((char *) entry
4902 						+ entry->hdr.entry_size);
4903 	}
4904 
4905 	if (!sane_start || (sane_end > 1)) {
4906 		device_printf(ha->pci_dev,
4907 			"\n%s: Template configuration error. Check Template\n",
4908 			__func__);
4909 	}
4910 
4911 	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4912 		__func__, template_hdr->num_of_entries));
4913 
4914 	return 0;
4915 }
4916 
4917 /*
4918  * Read CRB operation.
4919  */
4920 static uint32_t
4921 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4922 	uint32_t * data_buff)
4923 {
4924 	int loop_cnt;
4925 	int ret;
4926 	uint32_t op_count, addr, stride, value = 0;
4927 
4928 	addr = crb_entry->addr;
4929 	op_count = crb_entry->op_count;
4930 	stride = crb_entry->addr_stride;
4931 
4932 	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4933 
4934 		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4935 
4936 		if (ret)
4937 			return (0);
4938 
4939 		*data_buff++ = addr;
4940 		*data_buff++ = value;
4941 		addr = addr + stride;
4942 	}
4943 
4944 	/*
4945 	 * for testing purpose we return amount of data written
4946 	 */
4947 	return (op_count * (2 * sizeof(uint32_t)));
4948 }
4949 
4950 /*
4951  * Handle L2 Cache.
4952  */
4953 
4954 static uint32_t
4955 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4956 	uint32_t * data_buff)
4957 {
4958 	int i, k;
4959 	int loop_cnt;
4960 	int ret;
4961 
4962 	uint32_t read_value;
4963 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4964 	uint32_t tag_value, read_cnt;
4965 	volatile uint8_t cntl_value_r;
4966 	long timeout;
4967 	uint32_t data;
4968 
4969 	loop_cnt = cacheEntry->op_count;
4970 
4971 	read_addr = cacheEntry->read_addr;
4972 	cntrl_addr = cacheEntry->control_addr;
4973 	cntl_value_w = (uint32_t) cacheEntry->write_value;
4974 
4975 	tag_reg_addr = cacheEntry->tag_reg_addr;
4976 
4977 	tag_value = cacheEntry->init_tag_value;
4978 	read_cnt = cacheEntry->read_addr_cnt;
4979 
4980 	for (i = 0; i < loop_cnt; i++) {
4981 
4982 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4983 		if (ret)
4984 			return (0);
4985 
4986 		if (cacheEntry->write_value != 0) {
4987 
4988 			ret = ql_rdwr_indreg32(ha, cntrl_addr,
4989 					&cntl_value_w, 0);
4990 			if (ret)
4991 				return (0);
4992 		}
4993 
4994 		if (cacheEntry->poll_mask != 0) {
4995 
4996 			timeout = cacheEntry->poll_wait;
4997 
4998 			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4999 			if (ret)
5000 				return (0);
5001 
5002 			cntl_value_r = (uint8_t)data;
5003 
5004 			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
5005 
5006 				if (timeout) {
5007 					qla_mdelay(__func__, 1);
5008 					timeout--;
5009 				} else
5010 					break;
5011 
5012 				ret = ql_rdwr_indreg32(ha, cntrl_addr,
5013 						&data, 1);
5014 				if (ret)
5015 					return (0);
5016 
5017 				cntl_value_r = (uint8_t)data;
5018 			}
5019 			if (!timeout) {
5020 				/* Report timeout error.
5021 				 * core dump capture failed
5022 				 * Skip remaining entries.
5023 				 * Write buffer out to file
5024 				 * Use driver specific fields in template header
5025 				 * to report this error.
5026 				 */
5027 				return (-1);
5028 			}
5029 		}
5030 
5031 		addr = read_addr;
5032 		for (k = 0; k < read_cnt; k++) {
5033 
5034 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5035 			if (ret)
5036 				return (0);
5037 
5038 			*data_buff++ = read_value;
5039 			addr += cacheEntry->read_addr_stride;
5040 		}
5041 
5042 		tag_value += cacheEntry->tag_value_stride;
5043 	}
5044 
5045 	return (read_cnt * loop_cnt * sizeof(uint32_t));
5046 }
5047 
5048 /*
5049  * Handle L1 Cache.
5050  */
5051 
5052 static uint32_t
5053 ql_L1Cache(qla_host_t *ha,
5054 	ql_minidump_entry_cache_t *cacheEntry,
5055 	uint32_t *data_buff)
5056 {
5057 	int ret;
5058 	int i, k;
5059 	int loop_cnt;
5060 
5061 	uint32_t read_value;
5062 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
5063 	uint32_t tag_value, read_cnt;
5064 	uint32_t cntl_value_w;
5065 
5066 	loop_cnt = cacheEntry->op_count;
5067 
5068 	read_addr = cacheEntry->read_addr;
5069 	cntrl_addr = cacheEntry->control_addr;
5070 	cntl_value_w = (uint32_t) cacheEntry->write_value;
5071 
5072 	tag_reg_addr = cacheEntry->tag_reg_addr;
5073 
5074 	tag_value = cacheEntry->init_tag_value;
5075 	read_cnt = cacheEntry->read_addr_cnt;
5076 
5077 	for (i = 0; i < loop_cnt; i++) {
5078 
5079 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
5080 		if (ret)
5081 			return (0);
5082 
5083 		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
5084 		if (ret)
5085 			return (0);
5086 
5087 		addr = read_addr;
5088 		for (k = 0; k < read_cnt; k++) {
5089 
5090 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5091 			if (ret)
5092 				return (0);
5093 
5094 			*data_buff++ = read_value;
5095 			addr += cacheEntry->read_addr_stride;
5096 		}
5097 
5098 		tag_value += cacheEntry->tag_value_stride;
5099 	}
5100 
5101 	return (read_cnt * loop_cnt * sizeof(uint32_t));
5102 }
5103 
5104 /*
5105  * Reading OCM memory
5106  */
5107 
5108 static uint32_t
5109 ql_rdocm(qla_host_t *ha,
5110 	ql_minidump_entry_rdocm_t *ocmEntry,
5111 	uint32_t *data_buff)
5112 {
5113 	int i, loop_cnt;
5114 	volatile uint32_t addr;
5115 	volatile uint32_t value;
5116 
5117 	addr = ocmEntry->read_addr;
5118 	loop_cnt = ocmEntry->op_count;
5119 
5120 	for (i = 0; i < loop_cnt; i++) {
5121 		value = READ_REG32(ha, addr);
5122 		*data_buff++ = value;
5123 		addr += ocmEntry->read_addr_stride;
5124 	}
5125 	return (loop_cnt * sizeof(value));
5126 }
5127 
5128 /*
5129  * Read memory
5130  */
5131 
5132 static uint32_t
5133 ql_rdmem(qla_host_t *ha,
5134 	ql_minidump_entry_rdmem_t *mem_entry,
5135 	uint32_t *data_buff)
5136 {
5137 	int ret;
5138         int i, loop_cnt;
5139         volatile uint32_t addr;
5140 	q80_offchip_mem_val_t val;
5141 
5142         addr = mem_entry->read_addr;
5143 
5144 	/* size in bytes / 16 */
5145         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
5146 
5147         for (i = 0; i < loop_cnt; i++) {
5148 
5149 		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
5150 		if (ret)
5151 			return (0);
5152 
5153                 *data_buff++ = val.data_lo;
5154                 *data_buff++ = val.data_hi;
5155                 *data_buff++ = val.data_ulo;
5156                 *data_buff++ = val.data_uhi;
5157 
5158                 addr += (sizeof(uint32_t) * 4);
5159         }
5160 
5161         return (loop_cnt * (sizeof(uint32_t) * 4));
5162 }
5163 
5164 /*
5165  * Read Rom
5166  */
5167 
5168 static uint32_t
5169 ql_rdrom(qla_host_t *ha,
5170 	ql_minidump_entry_rdrom_t *romEntry,
5171 	uint32_t *data_buff)
5172 {
5173 	int ret;
5174 	int i, loop_cnt;
5175 	uint32_t addr;
5176 	uint32_t value;
5177 
5178 	addr = romEntry->read_addr;
5179 	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
5180 	loop_cnt /= sizeof(value);
5181 
5182 	for (i = 0; i < loop_cnt; i++) {
5183 
5184 		ret = ql_rd_flash32(ha, addr, &value);
5185 		if (ret)
5186 			return (0);
5187 
5188 		*data_buff++ = value;
5189 		addr += sizeof(value);
5190 	}
5191 
5192 	return (loop_cnt * sizeof(value));
5193 }
5194 
5195 /*
5196  * Read MUX data
5197  */
5198 
5199 static uint32_t
5200 ql_rdmux(qla_host_t *ha,
5201 	ql_minidump_entry_mux_t *muxEntry,
5202 	uint32_t *data_buff)
5203 {
5204 	int ret;
5205 	int loop_cnt;
5206 	uint32_t read_value, sel_value;
5207 	uint32_t read_addr, select_addr;
5208 
5209 	select_addr = muxEntry->select_addr;
5210 	sel_value = muxEntry->select_value;
5211 	read_addr = muxEntry->read_addr;
5212 
5213 	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
5214 
5215 		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
5216 		if (ret)
5217 			return (0);
5218 
5219 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5220 		if (ret)
5221 			return (0);
5222 
5223 		*data_buff++ = sel_value;
5224 		*data_buff++ = read_value;
5225 
5226 		sel_value += muxEntry->select_value_stride;
5227 	}
5228 
5229 	return (loop_cnt * (2 * sizeof(uint32_t)));
5230 }
5231 
5232 static uint32_t
5233 ql_rdmux2(qla_host_t *ha,
5234 	ql_minidump_entry_mux2_t *muxEntry,
5235 	uint32_t *data_buff)
5236 {
5237 	int ret;
5238         int loop_cnt;
5239 
5240         uint32_t select_addr_1, select_addr_2;
5241         uint32_t select_value_1, select_value_2;
5242         uint32_t select_value_count, select_value_mask;
5243         uint32_t read_addr, read_value;
5244 
5245         select_addr_1 = muxEntry->select_addr_1;
5246         select_addr_2 = muxEntry->select_addr_2;
5247         select_value_1 = muxEntry->select_value_1;
5248         select_value_2 = muxEntry->select_value_2;
5249         select_value_count = muxEntry->select_value_count;
5250         select_value_mask  = muxEntry->select_value_mask;
5251 
5252         read_addr = muxEntry->read_addr;
5253 
5254         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5255 		loop_cnt++) {
5256 
5257                 uint32_t temp_sel_val;
5258 
5259 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5260 		if (ret)
5261 			return (0);
5262 
5263                 temp_sel_val = select_value_1 & select_value_mask;
5264 
5265 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5266 		if (ret)
5267 			return (0);
5268 
5269 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5270 		if (ret)
5271 			return (0);
5272 
5273                 *data_buff++ = temp_sel_val;
5274                 *data_buff++ = read_value;
5275 
5276 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5277 		if (ret)
5278 			return (0);
5279 
5280                 temp_sel_val = select_value_2 & select_value_mask;
5281 
5282 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5283 		if (ret)
5284 			return (0);
5285 
5286 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5287 		if (ret)
5288 			return (0);
5289 
5290                 *data_buff++ = temp_sel_val;
5291                 *data_buff++ = read_value;
5292 
5293                 select_value_1 += muxEntry->select_value_stride;
5294                 select_value_2 += muxEntry->select_value_stride;
5295         }
5296 
5297         return (loop_cnt * (4 * sizeof(uint32_t)));
5298 }
5299 
5300 /*
5301  * Handling Queue State Reads.
5302  */
5303 
5304 static uint32_t
5305 ql_rdqueue(qla_host_t *ha,
5306 	ql_minidump_entry_queue_t *queueEntry,
5307 	uint32_t *data_buff)
5308 {
5309 	int ret;
5310 	int loop_cnt, k;
5311 	uint32_t read_value;
5312 	uint32_t read_addr, read_stride, select_addr;
5313 	uint32_t queue_id, read_cnt;
5314 
5315 	read_cnt = queueEntry->read_addr_cnt;
5316 	read_stride = queueEntry->read_addr_stride;
5317 	select_addr = queueEntry->select_addr;
5318 
5319 	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5320 		loop_cnt++) {
5321 
5322 		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5323 		if (ret)
5324 			return (0);
5325 
5326 		read_addr = queueEntry->read_addr;
5327 
5328 		for (k = 0; k < read_cnt; k++) {
5329 
5330 			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5331 			if (ret)
5332 				return (0);
5333 
5334 			*data_buff++ = read_value;
5335 			read_addr += read_stride;
5336 		}
5337 
5338 		queue_id += queueEntry->queue_id_stride;
5339 	}
5340 
5341 	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5342 }
5343 
5344 /*
5345  * Handling control entries.
5346  */
5347 
5348 static uint32_t
5349 ql_cntrl(qla_host_t *ha,
5350 	ql_minidump_template_hdr_t *template_hdr,
5351 	ql_minidump_entry_cntrl_t *crbEntry)
5352 {
5353 	int ret;
5354 	int count;
5355 	uint32_t opcode, read_value, addr, entry_addr;
5356 	long timeout;
5357 
5358 	entry_addr = crbEntry->addr;
5359 
5360 	for (count = 0; count < crbEntry->op_count; count++) {
5361 		opcode = crbEntry->opcode;
5362 
5363 		if (opcode & QL_DBG_OPCODE_WR) {
5364 
5365                 	ret = ql_rdwr_indreg32(ha, entry_addr,
5366 					&crbEntry->value_1, 0);
5367 			if (ret)
5368 				return (0);
5369 
5370 			opcode &= ~QL_DBG_OPCODE_WR;
5371 		}
5372 
5373 		if (opcode & QL_DBG_OPCODE_RW) {
5374 
5375                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5376 			if (ret)
5377 				return (0);
5378 
5379                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5380 			if (ret)
5381 				return (0);
5382 
5383 			opcode &= ~QL_DBG_OPCODE_RW;
5384 		}
5385 
5386 		if (opcode & QL_DBG_OPCODE_AND) {
5387 
5388                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5389 			if (ret)
5390 				return (0);
5391 
5392 			read_value &= crbEntry->value_2;
5393 			opcode &= ~QL_DBG_OPCODE_AND;
5394 
5395 			if (opcode & QL_DBG_OPCODE_OR) {
5396 				read_value |= crbEntry->value_3;
5397 				opcode &= ~QL_DBG_OPCODE_OR;
5398 			}
5399 
5400                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5401 			if (ret)
5402 				return (0);
5403 		}
5404 
5405 		if (opcode & QL_DBG_OPCODE_OR) {
5406 
5407                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5408 			if (ret)
5409 				return (0);
5410 
5411 			read_value |= crbEntry->value_3;
5412 
5413                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5414 			if (ret)
5415 				return (0);
5416 
5417 			opcode &= ~QL_DBG_OPCODE_OR;
5418 		}
5419 
5420 		if (opcode & QL_DBG_OPCODE_POLL) {
5421 
5422 			opcode &= ~QL_DBG_OPCODE_POLL;
5423 			timeout = crbEntry->poll_timeout;
5424 			addr = entry_addr;
5425 
5426                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5427 			if (ret)
5428 				return (0);
5429 
5430 			while ((read_value & crbEntry->value_2)
5431 				!= crbEntry->value_1) {
5432 
5433 				if (timeout) {
5434 					qla_mdelay(__func__, 1);
5435 					timeout--;
5436 				} else
5437 					break;
5438 
5439                 		ret = ql_rdwr_indreg32(ha, addr,
5440 						&read_value, 1);
5441 				if (ret)
5442 					return (0);
5443 			}
5444 
5445 			if (!timeout) {
5446 				/*
5447 				 * Report timeout error.
5448 				 * core dump capture failed
5449 				 * Skip remaining entries.
5450 				 * Write buffer out to file
5451 				 * Use driver specific fields in template header
5452 				 * to report this error.
5453 				 */
5454 				return (-1);
5455 			}
5456 		}
5457 
5458 		if (opcode & QL_DBG_OPCODE_RDSTATE) {
5459 			/*
5460 			 * decide which address to use.
5461 			 */
5462 			if (crbEntry->state_index_a) {
5463 				addr = template_hdr->saved_state_array[
5464 						crbEntry-> state_index_a];
5465 			} else {
5466 				addr = entry_addr;
5467 			}
5468 
5469                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5470 			if (ret)
5471 				return (0);
5472 
5473 			template_hdr->saved_state_array[crbEntry->state_index_v]
5474 					= read_value;
5475 			opcode &= ~QL_DBG_OPCODE_RDSTATE;
5476 		}
5477 
5478 		if (opcode & QL_DBG_OPCODE_WRSTATE) {
5479 			/*
5480 			 * decide which value to use.
5481 			 */
5482 			if (crbEntry->state_index_v) {
5483 				read_value = template_hdr->saved_state_array[
5484 						crbEntry->state_index_v];
5485 			} else {
5486 				read_value = crbEntry->value_1;
5487 			}
5488 			/*
5489 			 * decide which address to use.
5490 			 */
5491 			if (crbEntry->state_index_a) {
5492 				addr = template_hdr->saved_state_array[
5493 						crbEntry-> state_index_a];
5494 			} else {
5495 				addr = entry_addr;
5496 			}
5497 
5498                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5499 			if (ret)
5500 				return (0);
5501 
5502 			opcode &= ~QL_DBG_OPCODE_WRSTATE;
5503 		}
5504 
5505 		if (opcode & QL_DBG_OPCODE_MDSTATE) {
5506 			/*  Read value from saved state using index */
5507 			read_value = template_hdr->saved_state_array[
5508 						crbEntry->state_index_v];
5509 
5510 			read_value <<= crbEntry->shl; /*Shift left operation */
5511 			read_value >>= crbEntry->shr; /*Shift right operation */
5512 
5513 			if (crbEntry->value_2) {
5514 				/* check if AND mask is provided */
5515 				read_value &= crbEntry->value_2;
5516 			}
5517 
5518 			read_value |= crbEntry->value_3; /* OR operation */
5519 			read_value += crbEntry->value_1; /* increment op */
5520 
5521 			/* Write value back to state area. */
5522 
5523 			template_hdr->saved_state_array[crbEntry->state_index_v]
5524 					= read_value;
5525 			opcode &= ~QL_DBG_OPCODE_MDSTATE;
5526 		}
5527 
5528 		entry_addr += crbEntry->addr_stride;
5529 	}
5530 
5531 	return (0);
5532 }
5533 
5534 /*
5535  * Handling rd poll entry.
5536  */
5537 
5538 static uint32_t
5539 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5540 	uint32_t *data_buff)
5541 {
5542         int ret;
5543         int loop_cnt;
5544         uint32_t op_count, select_addr, select_value_stride, select_value;
5545         uint32_t read_addr, poll, mask, data_size, data;
5546         uint32_t wait_count = 0;
5547 
5548         select_addr            = entry->select_addr;
5549         read_addr              = entry->read_addr;
5550         select_value           = entry->select_value;
5551         select_value_stride    = entry->select_value_stride;
5552         op_count               = entry->op_count;
5553         poll                   = entry->poll;
5554         mask                   = entry->mask;
5555         data_size              = entry->data_size;
5556 
5557         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5558 
5559                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5560 		if (ret)
5561 			return (0);
5562 
5563                 wait_count = 0;
5564 
5565                 while (wait_count < poll) {
5566 
5567                         uint32_t temp;
5568 
5569 			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5570 			if (ret)
5571 				return (0);
5572 
5573                         if ( (temp & mask) != 0 ) {
5574                                 break;
5575                         }
5576                         wait_count++;
5577                 }
5578 
5579                 if (wait_count == poll) {
5580                         device_printf(ha->pci_dev,
5581 				"%s: Error in processing entry\n", __func__);
5582                         device_printf(ha->pci_dev,
5583 				"%s: wait_count <0x%x> poll <0x%x>\n",
5584 				__func__, wait_count, poll);
5585                         return 0;
5586                 }
5587 
5588 		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5589 		if (ret)
5590 			return (0);
5591 
5592                 *data_buff++ = select_value;
5593                 *data_buff++ = data;
5594                 select_value = select_value + select_value_stride;
5595         }
5596 
5597         /*
5598          * for testing purpose we return amount of data written
5599          */
5600         return (loop_cnt * (2 * sizeof(uint32_t)));
5601 }
5602 
5603 
5604 /*
5605  * Handling rd modify write poll entry.
5606  */
5607 
5608 static uint32_t
5609 ql_pollrd_modify_write(qla_host_t *ha,
5610 	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5611 	uint32_t *data_buff)
5612 {
5613 	int ret;
5614         uint32_t addr_1, addr_2, value_1, value_2, data;
5615         uint32_t poll, mask, data_size, modify_mask;
5616         uint32_t wait_count = 0;
5617 
5618         addr_1		= entry->addr_1;
5619         addr_2		= entry->addr_2;
5620         value_1		= entry->value_1;
5621         value_2		= entry->value_2;
5622 
5623         poll		= entry->poll;
5624         mask		= entry->mask;
5625         modify_mask	= entry->modify_mask;
5626         data_size	= entry->data_size;
5627 
5628 
5629 	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5630 	if (ret)
5631 		return (0);
5632 
5633         wait_count = 0;
5634         while (wait_count < poll) {
5635 
5636 		uint32_t temp;
5637 
5638 		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5639 		if (ret)
5640 			return (0);
5641 
5642                 if ( (temp & mask) != 0 ) {
5643                         break;
5644                 }
5645                 wait_count++;
5646         }
5647 
5648         if (wait_count == poll) {
5649                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5650 			__func__);
5651         } else {
5652 
5653 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5654 		if (ret)
5655 			return (0);
5656 
5657                 data = (data & modify_mask);
5658 
5659 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5660 		if (ret)
5661 			return (0);
5662 
5663 		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5664 		if (ret)
5665 			return (0);
5666 
5667                 /* Poll again */
5668                 wait_count = 0;
5669                 while (wait_count < poll) {
5670 
5671                         uint32_t temp;
5672 
5673 			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5674 			if (ret)
5675 				return (0);
5676 
5677                         if ( (temp & mask) != 0 ) {
5678                                 break;
5679                         }
5680                         wait_count++;
5681                 }
5682                 *data_buff++ = addr_2;
5683                 *data_buff++ = data;
5684         }
5685 
5686         /*
5687          * for testing purpose we return amount of data written
5688          */
5689         return (2 * sizeof(uint32_t));
5690 }
5691 
5692 
5693