xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_ioctl.c (revision f56f82e0)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 
29 /*
30  * File: qlnx_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "qlnx_os.h"
38 #include "bcm_osal.h"
39 
40 #include "reg_addr.h"
41 #include "ecore_gtt_reg_addr.h"
42 #include "ecore.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
45 #include "ecore_hw.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
59 #include "nvm_cfg.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
62 
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66 #include <sys/smp.h>
67 
68 
69 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
70                 struct thread *td);
71 
72 static struct cdevsw qlnx_cdevsw = {
73         .d_version = D_VERSION,
74         .d_ioctl = qlnx_eioctl,
75         .d_name = "qlnxioctl",
76 };
77 
78 int
79 qlnx_make_cdev(qlnx_host_t *ha)
80 {
81 	ha->ioctl_dev = make_dev(&qlnx_cdevsw,
82 				ha->ifp->if_dunit,
83 				UID_ROOT,
84 				GID_WHEEL,
85 				0600,
86 				"%s",
87 				if_name(ha->ifp));
88 
89 	if (ha->ioctl_dev == NULL)
90 		return (-1);
91 
92 	ha->ioctl_dev->si_drv1 = ha;
93 
94 	return (0);
95 }
96 
97 void
98 qlnx_del_cdev(qlnx_host_t *ha)
99 {
100 	if (ha->ioctl_dev != NULL)
101 		destroy_dev(ha->ioctl_dev);
102 	return;
103 }
104 
105 int
106 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
107 {
108 	int rval = EINVAL;
109 	struct ecore_hwfn *p_hwfn;
110 	struct ecore_ptt *p_ptt;
111 
112 	if (ha->grcdump_dwords[hwfn_index]) {
113 		/* the grcdump is already available */
114 		*num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
115 		return (0);
116 	}
117 
118 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
119 
120 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
121 	p_ptt = ecore_ptt_acquire(p_hwfn);
122 
123 	if (!p_ptt) {
124 		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
125 		return (rval);
126 	}
127 
128 	if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
129 			ha->grcdump[hwfn_index],
130 			(ha->grcdump_size[hwfn_index] >> 2),
131 			num_dumped_dwords)) == DBG_STATUS_OK) {
132 	 	rval = 0;
133 		ha->grcdump_taken = 1;
134 	} else
135 		QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
136 			   hwfn_index, rval);
137 
138 	ecore_ptt_release(p_hwfn, p_ptt);
139 
140 	return (rval);
141 }
142 
143 static void
144 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
145 {
146 	int i;
147 
148 	grcdump->pci_func = ha->pci_func;
149 
150 	for (i = 0; i < ha->cdev.num_hwfns; i++)
151 		grcdump->grcdump_size[i] = ha->grcdump_size[i];
152 
153 	return;
154 }
155 
156 static int
157 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
158 {
159 	int		i;
160 	int		rval = 0;
161 	uint32_t	dwords = 0;
162 
163 	grcdump->pci_func = ha->pci_func;
164 
165 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
166 
167 		if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
168 			(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
169 			return (EINVAL);
170 
171 		rval = qlnx_grc_dump(ha, &dwords, i);
172 
173 		if (rval)
174 			break;
175 
176 		grcdump->grcdump_dwords[i] = dwords;
177 
178 		QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
179 
180 		rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
181 				ha->grcdump_size[i]);
182 
183 		if (rval)
184 			break;
185 
186 		ha->grcdump_dwords[i] = 0;
187 	}
188 
189 	ha->grcdump_taken = 0;
190 
191 	return (rval);
192 }
193 
194 int
195 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
196 {
197 	int rval = EINVAL;
198 	struct ecore_hwfn *p_hwfn;
199 	struct ecore_ptt *p_ptt;
200 
201 	if (ha->idle_chk_dwords[hwfn_index]) {
202 		/* the idle check is already available */
203 		*num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
204 		return (0);
205 	}
206 
207 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
208 
209 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
210 	p_ptt = ecore_ptt_acquire(p_hwfn);
211 
212 	if (!p_ptt) {
213 		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
214 		return (rval);
215 	}
216 
217 	if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
218 			ha->idle_chk[hwfn_index],
219 			(ha->idle_chk_size[hwfn_index] >> 2),
220 			num_dumped_dwords)) == DBG_STATUS_OK) {
221 	 	rval = 0;
222 		ha->idle_chk_taken = 1;
223 	} else
224 		QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
225 			   hwfn_index, rval);
226 
227 	ecore_ptt_release(p_hwfn, p_ptt);
228 
229 	return (rval);
230 }
231 
232 static void
233 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
234 {
235 	int i;
236 
237 	idle_chk->pci_func = ha->pci_func;
238 
239 	for (i = 0; i < ha->cdev.num_hwfns; i++)
240 		idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
241 
242 	return;
243 }
244 
245 static int
246 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
247 {
248 	int		i;
249 	int		rval = 0;
250 	uint32_t	dwords = 0;
251 
252 	idle_chk->pci_func = ha->pci_func;
253 
254 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
255 
256 		if ((ha->idle_chk[i] == NULL) ||
257 				(idle_chk->idle_chk[i] == NULL) ||
258 				(idle_chk->idle_chk_size[i] <
259 					ha->idle_chk_size[i]))
260 			return (EINVAL);
261 
262 		rval = qlnx_idle_chk(ha, &dwords, i);
263 
264 		if (rval)
265 			break;
266 
267 		idle_chk->idle_chk_dwords[i] = dwords;
268 
269 		QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
270 
271                	rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
272 				ha->idle_chk_size[i]);
273 
274 		if (rval)
275 			break;
276 
277 		ha->idle_chk_dwords[i] = 0;
278 	}
279 	ha->idle_chk_taken = 0;
280 
281 	return (rval);
282 }
283 
284 static uint32_t
285 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
286 {
287         int rval = -1;
288         struct ecore_hwfn *p_hwfn;
289         struct ecore_ptt *p_ptt;
290 	uint32_t num_dwords = 0;
291 
292         p_hwfn = &ha->cdev.hwfns[hwfn_index];
293         p_ptt = ecore_ptt_acquire(p_hwfn);
294 
295         if (!p_ptt) {
296                 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
297                            hwfn_index, cmd);
298                 return (0);
299         }
300 
301 	switch (cmd) {
302 
303 	case QLNX_MCP_TRACE:
304         	rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
305 				p_ptt, &num_dwords);
306 		break;
307 
308 	case QLNX_REG_FIFO:
309         	rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
310 				p_ptt, &num_dwords);
311 		break;
312 
313 	case QLNX_IGU_FIFO:
314         	rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
315 				p_ptt, &num_dwords);
316 		break;
317 
318 	case QLNX_PROTECTION_OVERRIDE:
319         	rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
320 				p_ptt, &num_dwords);
321 		break;
322 
323 	case QLNX_FW_ASSERTS:
324         	rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
325 				p_ptt, &num_dwords);
326 		break;
327 	}
328 
329         if (rval != DBG_STATUS_OK) {
330                 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
331 		num_dwords = 0;
332         }
333 
334         ecore_ptt_release(p_hwfn, p_ptt);
335 
336         return ((num_dwords * sizeof (uint32_t)));
337 }
338 
339 static void
340 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
341 {
342 	int i;
343 
344 	trace->pci_func = ha->pci_func;
345 
346 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
347 		trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
348 	}
349 
350 	return;
351 }
352 
353 static int
354 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
355 {
356         int rval = -1;
357         struct ecore_hwfn *p_hwfn;
358         struct ecore_ptt *p_ptt;
359 	uint32_t num_dwords = 0;
360 	void *buffer;
361 
362 	buffer = qlnx_zalloc(trace->size[hwfn_index]);
363 	if (buffer == NULL) {
364                 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
365                            hwfn_index, trace->cmd);
366                 return (ENXIO);
367 	}
368 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
369 
370         p_hwfn = &ha->cdev.hwfns[hwfn_index];
371         p_ptt = ecore_ptt_acquire(p_hwfn);
372 
373         if (!p_ptt) {
374                 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
375                            hwfn_index, trace->cmd);
376                 return (ENXIO);
377         }
378 
379 	switch (trace->cmd) {
380 
381 	case QLNX_MCP_TRACE:
382         	rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
383 				buffer, (trace->size[hwfn_index] >> 2),
384 				&num_dwords);
385 		break;
386 
387 	case QLNX_REG_FIFO:
388         	rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
389 				buffer, (trace->size[hwfn_index] >> 2),
390 				&num_dwords);
391 		break;
392 
393 	case QLNX_IGU_FIFO:
394         	rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
395 				buffer, (trace->size[hwfn_index] >> 2),
396 				&num_dwords);
397 		break;
398 
399 	case QLNX_PROTECTION_OVERRIDE:
400         	rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
401 				buffer, (trace->size[hwfn_index] >> 2),
402 				&num_dwords);
403 		break;
404 
405 	case QLNX_FW_ASSERTS:
406         	rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
407 				buffer, (trace->size[hwfn_index] >> 2),
408 				&num_dwords);
409 		break;
410 	}
411 
412         if (rval != DBG_STATUS_OK) {
413                 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
414 		num_dwords = 0;
415         }
416 
417         ecore_ptt_release(p_hwfn, p_ptt);
418 
419 	trace->dwords[hwfn_index] = num_dwords;
420 
421 	if (num_dwords) {
422                	rval = copyout(buffer, trace->buffer[hwfn_index],
423 				(num_dwords << 2));
424 	}
425 
426         return (rval);
427 }
428 
429 static int
430 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
431 {
432 	int			rval = 0;
433 	struct ecore_hwfn	*p_hwfn;
434 
435 	if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
436 		return (EINVAL);
437 	}
438 
439 	p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
440 
441 	switch (reg_rd_wr->cmd) {
442 
443 		case QLNX_REG_READ_CMD:
444 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
445 				reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
446 							reg_rd_wr->addr);
447 			}
448 			break;
449 
450 		case QLNX_REG_WRITE_CMD:
451 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
452 				qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
453 					reg_rd_wr->val);
454 			}
455 			break;
456 
457 		default:
458 			rval = EINVAL;
459 			break;
460 	}
461 
462 	return (rval);
463 }
464 
465 static int
466 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
467 {
468 	int rval = 0;
469 
470 	switch (pci_cfg_rd_wr->cmd) {
471 
472 		case QLNX_PCICFG_READ:
473 			pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
474 						pci_cfg_rd_wr->reg,
475 						pci_cfg_rd_wr->width);
476 			break;
477 
478 		case QLNX_PCICFG_WRITE:
479 			pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
480 				pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
481 			break;
482 
483 		default:
484 			rval = EINVAL;
485 			break;
486 	}
487 
488 	return (rval);
489 }
490 
491 static void
492 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
493 {
494 	bzero(mac_addr->addr, sizeof(mac_addr->addr));
495 	snprintf(mac_addr->addr, sizeof(mac_addr->addr),
496 		"%02x:%02x:%02x:%02x:%02x:%02x",
497 		ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
498 		ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
499 
500 	return;
501 }
502 
503 static int
504 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
505 {
506 	int		i;
507 	int		rval = 0;
508 	uint32_t	dwords = 0;
509 	uint8_t		*outb;
510 
511 	regs->reg_buf_len = 0;
512 	outb = regs->reg_buf;
513 
514 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
515 
516 		rval = qlnx_grc_dump(ha, &dwords, i);
517 
518 		if (rval)
519 			break;
520 
521 		regs->reg_buf_len += (dwords << 2);
522 
523 		rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
524 
525 		if (rval)
526 			break;
527 
528 		ha->grcdump_dwords[i] = 0;
529 		outb += regs->reg_buf_len;
530 	}
531 
532 	ha->grcdump_taken = 0;
533 
534 	return (rval);
535 }
536 
537 static int
538 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
539 {
540 	int i;
541 	extern char qlnx_name_str[];
542 	extern char qlnx_ver_str[];
543 
544 	bzero(drv_info, sizeof(qlnx_drvinfo_t));
545 
546 	snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
547 		qlnx_name_str);
548 	snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
549 		qlnx_ver_str);
550 	snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
551 		ha->mfw_ver);
552 	snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
553 		"%s", ha->stormfw_ver);
554 
555 	drv_info->eeprom_dump_len = ha->flash_size;
556 
557 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
558 		drv_info->reg_dump_len += ha->grcdump_size[i];
559 	}
560 
561 	snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
562 		"%d:%d:%d", pci_get_bus(ha->pci_dev),
563 		pci_get_slot(ha->pci_dev), ha->pci_func);
564 
565 	return (0);
566 }
567 
568 static int
569 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
570 {
571 	struct ecore_hwfn *p_hwfn;
572 	struct qlnx_link_output if_link;
573 
574 	p_hwfn = &ha->cdev.hwfns[0];
575 
576 	qlnx_fill_link(p_hwfn, &if_link);
577 
578 	dev_info->supported = if_link.supported_caps;
579 	dev_info->advertising = if_link.advertised_caps;
580 	dev_info->speed = if_link.speed;
581 	dev_info->duplex = if_link.duplex;
582 	dev_info->port = ha->pci_func & 0x1;
583 	dev_info->autoneg = if_link.autoneg;
584 
585 	return (0);
586 }
587 
588 static int
589 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
590 {
591 	uint8_t *buf;
592 	int ret = 0;
593 
594 	if ((nvram->data == NULL) || (nvram->data_len == 0))
595 		return (EINVAL);
596 
597 	buf = qlnx_zalloc(nvram->data_len);
598 
599 	ret = copyin(nvram->data, buf, nvram->data_len);
600 
601 	QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
602 		 data_len = 0x%x ret = 0x%x exit\n",
603 		cmd, nvram->data, nvram->data_len, ret);
604 
605 	if (ret == 0) {
606 		ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
607 			nvram->offset, buf, nvram->data_len);
608 	}
609 
610 	QL_DPRINT9(ha, "cmd = 0x%x data = %p \
611 		 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
612 		cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
613 
614 	free(buf, M_QLNXBUF);
615 
616 	return (ret);
617 }
618 
619 static int
620 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
621 {
622 	uint8_t *buf;
623 	int ret = 0;
624 
625 	if ((nvram->data == NULL) || (nvram->data_len == 0))
626 		return (EINVAL);
627 
628 	buf = qlnx_zalloc(nvram->data_len);
629 
630 	ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
631 		nvram->data_len);
632 
633 	QL_DPRINT9(ha, " data = %p data_len = 0x%x \
634 		 resp = 0x%x ret = 0x%x exit\n",
635 		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
636 
637 	if (ret == 0) {
638 		ret = copyout(buf, nvram->data, nvram->data_len);
639 	}
640 
641 	free(buf, M_QLNXBUF);
642 
643 	return (ret);
644 }
645 
646 static int
647 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
648 {
649 	uint8_t *buf;
650 	int ret = 0;
651 
652 	if ((nvram->data == NULL) || (nvram->data_len == 0))
653 		return (EINVAL);
654 
655 	buf = qlnx_zalloc(nvram->data_len);
656 
657 
658 	ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
659 
660 	QL_DPRINT9(ha, "data = %p data_len = 0x%x \
661 		 resp = 0x%x ret = 0x%x exit\n",
662 		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
663 
664 	if (ret == 0) {
665 		ret = copyout(buf, nvram->data, nvram->data_len);
666 	}
667 
668 	free(buf, M_QLNXBUF);
669 
670 	return (ret);
671 }
672 
673 static int
674 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
675 {
676 	int ret = 0;
677 
678 	switch (nvram->cmd) {
679 
680 	case QLNX_NVRAM_CMD_WRITE_NVRAM:
681 		ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
682 		break;
683 
684 	case QLNX_NVRAM_CMD_PUT_FILE_DATA:
685 		ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
686 		break;
687 
688 	case QLNX_NVRAM_CMD_READ_NVRAM:
689 		ret = qlnx_read_nvram(ha, nvram);
690 		break;
691 
692 	case QLNX_NVRAM_CMD_SET_SECURE_MODE:
693 		ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
694 
695 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
696 			 resp = 0x%x ret = 0x%x exit\n",
697 			 ha->cdev.mcp_nvm_resp, ret);
698 		break;
699 
700 	case QLNX_NVRAM_CMD_DEL_FILE:
701 		ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
702 
703 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
704 			 resp = 0x%x ret = 0x%x exit\n",
705 			ha->cdev.mcp_nvm_resp, ret);
706 		break;
707 
708 	case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
709 		ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
710 
711 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
712 			 resp = 0x%x ret = 0x%x exit\n",
713 			ha->cdev.mcp_nvm_resp, ret);
714 		break;
715 
716 	case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
717 		ret = qlnx_get_nvram_resp(ha, nvram);
718 		break;
719 
720 	default:
721 		ret = EINVAL;
722 		break;
723 	}
724 
725 	return (ret);
726 }
727 
728 static void
729 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
730 {
731 	int i;
732 	int index;
733 	int ret;
734 	int stats_copied = 0;
735 
736 	s_stats->num_hwfns = ha->cdev.num_hwfns;
737 
738 //	if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
739 //		return;
740 
741 	s_stats->num_samples = ha->storm_stats_index;
742 
743 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
744 
745 		index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
746 
747 		if (s_stats->buffer[i]) {
748 
749 			ret = copyout(&ha->storm_stats[index],
750 					s_stats->buffer[i],
751 					QLNX_STORM_STATS_BYTES_PER_HWFN);
752 			if (ret) {
753 				printf("%s [%d]: failed\n", __func__, i);
754 			}
755 
756 			if (s_stats->num_samples ==
757 				QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
758 
759 				bzero((void *)&ha->storm_stats[i],
760 					QLNX_STORM_STATS_BYTES_PER_HWFN);
761 
762 				stats_copied = 1;
763 			}
764 		}
765 	}
766 
767 	if (stats_copied)
768 		ha->storm_stats_index = 0;
769 
770 	return;
771 }
772 
773 
774 static int
775 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
776 	struct thread *td)
777 {
778 	qlnx_host_t	*ha;
779 	int		rval = 0;
780 	struct ifnet	*ifp;
781 	qlnx_trace_t	*trace;
782 	int		i;
783 
784 	if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
785 		return ENXIO;
786 
787 	ifp = ha->ifp;
788 
789 	switch (cmd) {
790 
791 	case QLNX_GRC_DUMP_SIZE:
792 		qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
793 		break;
794 
795 	case QLNX_GRC_DUMP:
796 		rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
797 		break;
798 
799 	case QLNX_IDLE_CHK_SIZE:
800 		qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
801 		break;
802 
803 	case QLNX_IDLE_CHK:
804 		rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
805 		break;
806 
807 	case QLNX_DRV_INFO:
808 		rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
809 		break;
810 
811 	case QLNX_DEV_SETTING:
812 		rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
813 		break;
814 
815 	case QLNX_GET_REGS:
816 		rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
817 		break;
818 
819 	case QLNX_NVRAM:
820 		rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
821 		break;
822 
823 	case QLNX_RD_WR_REG:
824 		rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
825 		break;
826 
827 	case QLNX_RD_WR_PCICFG:
828 		rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
829 		break;
830 
831 	case QLNX_MAC_ADDR:
832 		qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
833 		break;
834 
835 	case QLNX_STORM_STATS:
836 		qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
837 		break;
838 
839 	case QLNX_TRACE_SIZE:
840 		qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
841 		break;
842 
843 	case QLNX_TRACE:
844 		trace = (qlnx_trace_t *)data;
845 
846 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
847 
848 			if (trace->size[i] && trace->cmd && trace->buffer[i])
849 				rval = qlnx_get_trace(ha, i, trace);
850 
851 			if (rval)
852 				break;
853 		}
854 		break;
855 
856 	default:
857 		rval = EINVAL;
858 		break;
859 	}
860 
861 	return (rval);
862 }
863 
864