xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_ioctl.c (revision 85732ac8)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 
29 /*
30  * File: qlnx_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "qlnx_os.h"
38 #include "bcm_osal.h"
39 
40 #include "reg_addr.h"
41 #include "ecore_gtt_reg_addr.h"
42 #include "ecore.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
45 #include "ecore_hw.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
59 #include "nvm_cfg.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
62 #include "ecore_dcbx_api.h"
63 
64 #include "qlnx_ioctl.h"
65 #include "qlnx_def.h"
66 #include "qlnx_ver.h"
67 #include <sys/smp.h>
68 
69 
70 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
71                 struct thread *td);
72 
73 static struct cdevsw qlnx_cdevsw = {
74         .d_version = D_VERSION,
75         .d_ioctl = qlnx_eioctl,
76         .d_name = "qlnxioctl",
77 };
78 
79 int
80 qlnx_make_cdev(qlnx_host_t *ha)
81 {
82 	ha->ioctl_dev = make_dev(&qlnx_cdevsw,
83 				ha->ifp->if_dunit,
84 				UID_ROOT,
85 				GID_WHEEL,
86 				0600,
87 				"%s",
88 				if_name(ha->ifp));
89 
90 	if (ha->ioctl_dev == NULL)
91 		return (-1);
92 
93 	ha->ioctl_dev->si_drv1 = ha;
94 
95 	return (0);
96 }
97 
98 void
99 qlnx_del_cdev(qlnx_host_t *ha)
100 {
101 	if (ha->ioctl_dev != NULL)
102 		destroy_dev(ha->ioctl_dev);
103 	return;
104 }
105 
106 int
107 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
108 {
109 	int rval = EINVAL;
110 	struct ecore_hwfn *p_hwfn;
111 	struct ecore_ptt *p_ptt;
112 
113 	if (ha->grcdump_dwords[hwfn_index]) {
114 		/* the grcdump is already available */
115 		*num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
116 		return (0);
117 	}
118 
119 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
120 
121 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
122 	p_ptt = ecore_ptt_acquire(p_hwfn);
123 
124 	if (!p_ptt) {
125 		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
126 		return (rval);
127 	}
128 
129 	if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
130 			ha->grcdump[hwfn_index],
131 			(ha->grcdump_size[hwfn_index] >> 2),
132 			num_dumped_dwords)) == DBG_STATUS_OK) {
133 	 	rval = 0;
134 		ha->grcdump_taken = 1;
135 	} else
136 		QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
137 			   hwfn_index, rval);
138 
139 	ecore_ptt_release(p_hwfn, p_ptt);
140 
141 	return (rval);
142 }
143 
144 static void
145 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
146 {
147 	int i;
148 
149 	grcdump->pci_func = ha->pci_func;
150 
151 	for (i = 0; i < ha->cdev.num_hwfns; i++)
152 		grcdump->grcdump_size[i] = ha->grcdump_size[i];
153 
154 	return;
155 }
156 
157 static int
158 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
159 {
160 	int		i;
161 	int		rval = 0;
162 	uint32_t	dwords = 0;
163 
164 	grcdump->pci_func = ha->pci_func;
165 
166 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
167 
168 		if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
169 			(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
170 			return (EINVAL);
171 
172 		rval = qlnx_grc_dump(ha, &dwords, i);
173 
174 		if (rval)
175 			break;
176 
177 		grcdump->grcdump_dwords[i] = dwords;
178 
179 		QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
180 
181 		rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
182 				ha->grcdump_size[i]);
183 
184 		if (rval)
185 			break;
186 
187 		ha->grcdump_dwords[i] = 0;
188 	}
189 
190 	ha->grcdump_taken = 0;
191 
192 	return (rval);
193 }
194 
195 int
196 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
197 {
198 	int rval = EINVAL;
199 	struct ecore_hwfn *p_hwfn;
200 	struct ecore_ptt *p_ptt;
201 
202 	if (ha->idle_chk_dwords[hwfn_index]) {
203 		/* the idle check is already available */
204 		*num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
205 		return (0);
206 	}
207 
208 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
209 
210 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
211 	p_ptt = ecore_ptt_acquire(p_hwfn);
212 
213 	if (!p_ptt) {
214 		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
215 		return (rval);
216 	}
217 
218 	if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
219 			ha->idle_chk[hwfn_index],
220 			(ha->idle_chk_size[hwfn_index] >> 2),
221 			num_dumped_dwords)) == DBG_STATUS_OK) {
222 	 	rval = 0;
223 		ha->idle_chk_taken = 1;
224 	} else
225 		QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
226 			   hwfn_index, rval);
227 
228 	ecore_ptt_release(p_hwfn, p_ptt);
229 
230 	return (rval);
231 }
232 
233 static void
234 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
235 {
236 	int i;
237 
238 	idle_chk->pci_func = ha->pci_func;
239 
240 	for (i = 0; i < ha->cdev.num_hwfns; i++)
241 		idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
242 
243 	return;
244 }
245 
246 static int
247 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
248 {
249 	int		i;
250 	int		rval = 0;
251 	uint32_t	dwords = 0;
252 
253 	idle_chk->pci_func = ha->pci_func;
254 
255 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
256 
257 		if ((ha->idle_chk[i] == NULL) ||
258 				(idle_chk->idle_chk[i] == NULL) ||
259 				(idle_chk->idle_chk_size[i] <
260 					ha->idle_chk_size[i]))
261 			return (EINVAL);
262 
263 		rval = qlnx_idle_chk(ha, &dwords, i);
264 
265 		if (rval)
266 			break;
267 
268 		idle_chk->idle_chk_dwords[i] = dwords;
269 
270 		QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
271 
272                	rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
273 				ha->idle_chk_size[i]);
274 
275 		if (rval)
276 			break;
277 
278 		ha->idle_chk_dwords[i] = 0;
279 	}
280 	ha->idle_chk_taken = 0;
281 
282 	return (rval);
283 }
284 
285 static uint32_t
286 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
287 {
288         int rval = -1;
289         struct ecore_hwfn *p_hwfn;
290         struct ecore_ptt *p_ptt;
291 	uint32_t num_dwords = 0;
292 
293         p_hwfn = &ha->cdev.hwfns[hwfn_index];
294         p_ptt = ecore_ptt_acquire(p_hwfn);
295 
296         if (!p_ptt) {
297                 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
298                            hwfn_index, cmd);
299                 return (0);
300         }
301 
302 	switch (cmd) {
303 
304 	case QLNX_MCP_TRACE:
305         	rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
306 				p_ptt, &num_dwords);
307 		break;
308 
309 	case QLNX_REG_FIFO:
310         	rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
311 				p_ptt, &num_dwords);
312 		break;
313 
314 	case QLNX_IGU_FIFO:
315         	rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
316 				p_ptt, &num_dwords);
317 		break;
318 
319 	case QLNX_PROTECTION_OVERRIDE:
320         	rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
321 				p_ptt, &num_dwords);
322 		break;
323 
324 	case QLNX_FW_ASSERTS:
325         	rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
326 				p_ptt, &num_dwords);
327 		break;
328 	}
329 
330         if (rval != DBG_STATUS_OK) {
331                 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
332 		num_dwords = 0;
333         }
334 
335         ecore_ptt_release(p_hwfn, p_ptt);
336 
337         return ((num_dwords * sizeof (uint32_t)));
338 }
339 
340 static void
341 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
342 {
343 	int i;
344 
345 	trace->pci_func = ha->pci_func;
346 
347 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
348 		trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
349 	}
350 
351 	return;
352 }
353 
354 static int
355 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
356 {
357         int rval = -1;
358         struct ecore_hwfn *p_hwfn;
359         struct ecore_ptt *p_ptt;
360 	uint32_t num_dwords = 0;
361 	void *buffer;
362 
363 	buffer = qlnx_zalloc(trace->size[hwfn_index]);
364 	if (buffer == NULL) {
365                 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
366                            hwfn_index, trace->cmd);
367                 return (ENXIO);
368 	}
369 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
370 
371         p_hwfn = &ha->cdev.hwfns[hwfn_index];
372         p_ptt = ecore_ptt_acquire(p_hwfn);
373 
374         if (!p_ptt) {
375                 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
376                            hwfn_index, trace->cmd);
377                 return (ENXIO);
378         }
379 
380 	switch (trace->cmd) {
381 
382 	case QLNX_MCP_TRACE:
383         	rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
384 				buffer, (trace->size[hwfn_index] >> 2),
385 				&num_dwords);
386 		break;
387 
388 	case QLNX_REG_FIFO:
389         	rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
390 				buffer, (trace->size[hwfn_index] >> 2),
391 				&num_dwords);
392 		break;
393 
394 	case QLNX_IGU_FIFO:
395         	rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
396 				buffer, (trace->size[hwfn_index] >> 2),
397 				&num_dwords);
398 		break;
399 
400 	case QLNX_PROTECTION_OVERRIDE:
401         	rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
402 				buffer, (trace->size[hwfn_index] >> 2),
403 				&num_dwords);
404 		break;
405 
406 	case QLNX_FW_ASSERTS:
407         	rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
408 				buffer, (trace->size[hwfn_index] >> 2),
409 				&num_dwords);
410 		break;
411 	}
412 
413         if (rval != DBG_STATUS_OK) {
414                 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
415 		num_dwords = 0;
416         }
417 
418         ecore_ptt_release(p_hwfn, p_ptt);
419 
420 	trace->dwords[hwfn_index] = num_dwords;
421 
422 	if (num_dwords) {
423                	rval = copyout(buffer, trace->buffer[hwfn_index],
424 				(num_dwords << 2));
425 	}
426 
427         return (rval);
428 }
429 
430 static int
431 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
432 {
433 	int			rval = 0;
434 	struct ecore_hwfn	*p_hwfn;
435 
436 	if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
437 		return (EINVAL);
438 	}
439 
440 	p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
441 
442 	switch (reg_rd_wr->cmd) {
443 
444 		case QLNX_REG_READ_CMD:
445 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
446 				reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
447 							reg_rd_wr->addr);
448 			}
449 			break;
450 
451 		case QLNX_REG_WRITE_CMD:
452 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
453 				qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
454 					reg_rd_wr->val);
455 			}
456 			break;
457 
458 		default:
459 			rval = EINVAL;
460 			break;
461 	}
462 
463 	return (rval);
464 }
465 
466 static int
467 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
468 {
469 	int rval = 0;
470 
471 	switch (pci_cfg_rd_wr->cmd) {
472 
473 		case QLNX_PCICFG_READ:
474 			pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
475 						pci_cfg_rd_wr->reg,
476 						pci_cfg_rd_wr->width);
477 			break;
478 
479 		case QLNX_PCICFG_WRITE:
480 			pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
481 				pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
482 			break;
483 
484 		default:
485 			rval = EINVAL;
486 			break;
487 	}
488 
489 	return (rval);
490 }
491 
492 static void
493 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
494 {
495 	bzero(mac_addr->addr, sizeof(mac_addr->addr));
496 	snprintf(mac_addr->addr, sizeof(mac_addr->addr),
497 		"%02x:%02x:%02x:%02x:%02x:%02x",
498 		ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
499 		ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
500 
501 	return;
502 }
503 
504 static int
505 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
506 {
507 	int		i;
508 	int		rval = 0;
509 	uint32_t	dwords = 0;
510 	uint8_t		*outb;
511 
512 	regs->reg_buf_len = 0;
513 	outb = regs->reg_buf;
514 
515 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
516 
517 		rval = qlnx_grc_dump(ha, &dwords, i);
518 
519 		if (rval)
520 			break;
521 
522 		regs->reg_buf_len += (dwords << 2);
523 
524 		rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
525 
526 		if (rval)
527 			break;
528 
529 		ha->grcdump_dwords[i] = 0;
530 		outb += regs->reg_buf_len;
531 	}
532 
533 	ha->grcdump_taken = 0;
534 
535 	return (rval);
536 }
537 
538 extern char qlnx_name_str[];
539 extern char qlnx_ver_str[];
540 
541 static int
542 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
543 {
544 	int i;
545 
546 	bzero(drv_info, sizeof(qlnx_drvinfo_t));
547 
548 	snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
549 		qlnx_name_str);
550 	snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
551 		qlnx_ver_str);
552 	snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
553 		ha->mfw_ver);
554 	snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
555 		"%s", ha->stormfw_ver);
556 
557 	drv_info->eeprom_dump_len = ha->flash_size;
558 
559 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
560 		drv_info->reg_dump_len += ha->grcdump_size[i];
561 	}
562 
563 	snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
564 		"%d:%d:%d", pci_get_bus(ha->pci_dev),
565 		pci_get_slot(ha->pci_dev), ha->pci_func);
566 
567 	return (0);
568 }
569 
570 static int
571 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
572 {
573 	struct ecore_hwfn *p_hwfn;
574 	struct qlnx_link_output if_link;
575 
576 	p_hwfn = &ha->cdev.hwfns[0];
577 
578 	qlnx_fill_link(ha, p_hwfn, &if_link);
579 
580 	dev_info->supported = if_link.supported_caps;
581 	dev_info->advertising = if_link.advertised_caps;
582 	dev_info->speed = if_link.speed;
583 	dev_info->duplex = if_link.duplex;
584 	dev_info->port = ha->pci_func & 0x1;
585 	dev_info->autoneg = if_link.autoneg;
586 
587 	return (0);
588 }
589 
590 static int
591 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
592 {
593 	uint8_t *buf;
594 	int ret = 0;
595 
596 	if ((nvram->data == NULL) || (nvram->data_len == 0))
597 		return (EINVAL);
598 
599 	buf = qlnx_zalloc(nvram->data_len);
600 
601 	ret = copyin(nvram->data, buf, nvram->data_len);
602 
603 	QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
604 		 data_len = 0x%x ret = 0x%x exit\n",
605 		cmd, nvram->data, nvram->data_len, ret);
606 
607 	if (ret == 0) {
608 		ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
609 			nvram->offset, buf, nvram->data_len);
610 	}
611 
612 	QL_DPRINT9(ha, "cmd = 0x%x data = %p \
613 		 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
614 		cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
615 
616 	free(buf, M_QLNXBUF);
617 
618 	return (ret);
619 }
620 
621 static int
622 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
623 {
624 	uint8_t *buf;
625 	int ret = 0;
626 
627 	if ((nvram->data == NULL) || (nvram->data_len == 0))
628 		return (EINVAL);
629 
630 	buf = qlnx_zalloc(nvram->data_len);
631 
632 	ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
633 		nvram->data_len);
634 
635 	QL_DPRINT9(ha, " data = %p data_len = 0x%x \
636 		 resp = 0x%x ret = 0x%x exit\n",
637 		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
638 
639 	if (ret == 0) {
640 		ret = copyout(buf, nvram->data, nvram->data_len);
641 	}
642 
643 	free(buf, M_QLNXBUF);
644 
645 	return (ret);
646 }
647 
648 static int
649 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
650 {
651 	uint8_t *buf;
652 	int ret = 0;
653 
654 	if ((nvram->data == NULL) || (nvram->data_len == 0))
655 		return (EINVAL);
656 
657 	buf = qlnx_zalloc(nvram->data_len);
658 
659 
660 	ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
661 
662 	QL_DPRINT9(ha, "data = %p data_len = 0x%x \
663 		 resp = 0x%x ret = 0x%x exit\n",
664 		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
665 
666 	if (ret == 0) {
667 		ret = copyout(buf, nvram->data, nvram->data_len);
668 	}
669 
670 	free(buf, M_QLNXBUF);
671 
672 	return (ret);
673 }
674 
675 static int
676 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
677 {
678 	int ret = 0;
679 
680 	switch (nvram->cmd) {
681 
682 	case QLNX_NVRAM_CMD_WRITE_NVRAM:
683 		ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
684 		break;
685 
686 	case QLNX_NVRAM_CMD_PUT_FILE_DATA:
687 		ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
688 		break;
689 
690 	case QLNX_NVRAM_CMD_READ_NVRAM:
691 		ret = qlnx_read_nvram(ha, nvram);
692 		break;
693 
694 	case QLNX_NVRAM_CMD_SET_SECURE_MODE:
695 		ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
696 
697 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
698 			 resp = 0x%x ret = 0x%x exit\n",
699 			 ha->cdev.mcp_nvm_resp, ret);
700 		break;
701 
702 	case QLNX_NVRAM_CMD_DEL_FILE:
703 		ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
704 
705 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
706 			 resp = 0x%x ret = 0x%x exit\n",
707 			ha->cdev.mcp_nvm_resp, ret);
708 		break;
709 
710 	case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
711 		ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
712 
713 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
714 			 resp = 0x%x ret = 0x%x exit\n",
715 			ha->cdev.mcp_nvm_resp, ret);
716 		break;
717 
718 	case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
719 		ret = qlnx_get_nvram_resp(ha, nvram);
720 		break;
721 
722 	default:
723 		ret = EINVAL;
724 		break;
725 	}
726 
727 	return (ret);
728 }
729 
730 static void
731 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
732 {
733 	int i;
734 	int index;
735 	int ret;
736 	int stats_copied = 0;
737 
738 	s_stats->num_hwfns = ha->cdev.num_hwfns;
739 
740 //	if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
741 //		return;
742 
743 	s_stats->num_samples = ha->storm_stats_index;
744 
745 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
746 
747 		index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
748 
749 		if (s_stats->buffer[i]) {
750 
751 			ret = copyout(&ha->storm_stats[index],
752 					s_stats->buffer[i],
753 					QLNX_STORM_STATS_BYTES_PER_HWFN);
754 			if (ret) {
755 				printf("%s [%d]: failed\n", __func__, i);
756 			}
757 
758 			if (s_stats->num_samples ==
759 				QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
760 
761 				bzero((void *)&ha->storm_stats[i],
762 					QLNX_STORM_STATS_BYTES_PER_HWFN);
763 
764 				stats_copied = 1;
765 			}
766 		}
767 	}
768 
769 	if (stats_copied)
770 		ha->storm_stats_index = 0;
771 
772 	return;
773 }
774 
775 #ifdef QLNX_USER_LLDP
776 
777 static int
778 qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
779 	struct ecore_ptt *p_ptt, uint32_t enable)
780 {
781 	int ret = 0;
782 	uint8_t lldp_mac[6] = {0};
783 	struct ecore_lldp_config_params lldp_params;
784 	struct ecore_lldp_sys_tlvs tlv_params;
785 
786 	ret = ecore_mcp_get_lldp_mac(p_hwfn, p_ptt, lldp_mac);
787 
788 	if (ret != ECORE_SUCCESS) {
789                 device_printf(ha->pci_dev,
790 			"%s: ecore_mcp_get_lldp_mac failed\n", __func__);
791                 return (-1);
792 	}
793 
794 	bzero(&lldp_params, sizeof(struct ecore_lldp_config_params));
795 	bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
796 
797 	lldp_params.agent = ECORE_LLDP_NEAREST_BRIDGE;
798 	lldp_params.tx_interval = 30; //Default value used as suggested by MFW
799 	lldp_params.tx_hold = 4; //Default value used as suggested by MFW
800 	lldp_params.tx_credit = 5; //Default value used as suggested by MFW
801 	lldp_params.rx_enable = enable ? 1 : 0;
802 	lldp_params.tx_enable = enable ? 1 : 0;
803 
804 	lldp_params.chassis_id_tlv[0] = 0;
805 	lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_TYPE_CHASSIS_ID << 1);
806 	lldp_params.chassis_id_tlv[0] |=
807 		((QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS +
808 			QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN) << 8);
809 	lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC << 16);
810 	lldp_params.chassis_id_tlv[0] |= lldp_mac[0] << 24;
811 	lldp_params.chassis_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
812 		 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
813 	lldp_params.chassis_id_tlv[2] = lldp_mac[5];
814 
815 
816 	lldp_params.port_id_tlv[0] = 0;
817 	lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
818 	lldp_params.port_id_tlv[0] |=
819 		((QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS +
820 			QLNX_LLDP_PORT_ID_MAC_ADDR_LEN) << 8);
821 	lldp_params.port_id_tlv[0] |= (QLNX_LLDP_PORT_ID_SUBTYPE_MAC << 16);
822 	lldp_params.port_id_tlv[0] |= lldp_mac[0] << 24;
823 	lldp_params.port_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
824 		 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
825 	lldp_params.port_id_tlv[2] = lldp_mac[5];
826 
827 	ret = ecore_lldp_set_params(p_hwfn, p_ptt, &lldp_params);
828 
829 	if (ret != ECORE_SUCCESS) {
830                 device_printf(ha->pci_dev,
831 			"%s: ecore_lldp_set_params failed\n", __func__);
832                 return (-1);
833 	}
834 
835 	//If LLDP is disable then disable discard_mandatory_tlv flag
836 	if (!enable) {
837 		tlv_params.discard_mandatory_tlv = false;
838 		tlv_params.buf_size = 0;
839 		ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
840     	}
841 
842 	if (ret != ECORE_SUCCESS) {
843                 device_printf(ha->pci_dev,
844 			"%s: ecore_lldp_set_system_tlvs failed\n", __func__);
845 	}
846 
847 	return (ret);
848 }
849 
850 static int
851 qlnx_register_default_lldp_tlvs(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
852 	struct ecore_ptt *p_ptt)
853 {
854 	int ret = 0;
855 
856 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
857 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_CHASSIS_ID);
858 	if (ret != ECORE_SUCCESS) {
859                 device_printf(ha->pci_dev,
860 			"%s: QLNX_LLDP_TYPE_CHASSIS_ID failed\n", __func__);
861 		goto qlnx_register_default_lldp_tlvs_exit;
862 	}
863 
864 	//register Port ID TLV
865 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
866 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_ID);
867 	if (ret != ECORE_SUCCESS) {
868                 device_printf(ha->pci_dev,
869 			"%s: QLNX_LLDP_TYPE_PORT_ID failed\n", __func__);
870 		goto qlnx_register_default_lldp_tlvs_exit;
871 	}
872 
873 	//register TTL TLV
874 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
875 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_TTL);
876 	if (ret != ECORE_SUCCESS) {
877                 device_printf(ha->pci_dev,
878 			"%s: QLNX_LLDP_TYPE_TTL failed\n", __func__);
879 		goto qlnx_register_default_lldp_tlvs_exit;
880 	}
881 
882 	//register Port Description TLV
883 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
884 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_DESC);
885 	if (ret != ECORE_SUCCESS) {
886                 device_printf(ha->pci_dev,
887 			"%s: QLNX_LLDP_TYPE_PORT_DESC failed\n", __func__);
888 		goto qlnx_register_default_lldp_tlvs_exit;
889 	}
890 
891 	//register System Name TLV
892 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
893 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_NAME);
894 	if (ret != ECORE_SUCCESS) {
895                 device_printf(ha->pci_dev,
896 			"%s: QLNX_LLDP_TYPE_SYS_NAME failed\n", __func__);
897 		goto qlnx_register_default_lldp_tlvs_exit;
898 	}
899 
900 	//register System Description TLV
901 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
902 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_DESC);
903 	if (ret != ECORE_SUCCESS) {
904                 device_printf(ha->pci_dev,
905 			"%s: QLNX_LLDP_TYPE_SYS_DESC failed\n", __func__);
906 		goto qlnx_register_default_lldp_tlvs_exit;
907 	}
908 
909 	//register System Capabilities TLV
910 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
911 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_CAPS);
912 	if (ret != ECORE_SUCCESS) {
913                 device_printf(ha->pci_dev,
914 			"%s: QLNX_LLDP_TYPE_SYS_CAPS failed\n", __func__);
915 		goto qlnx_register_default_lldp_tlvs_exit;
916 	}
917 
918 	//register Management Address TLV
919 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
920 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_MGMT_ADDR);
921 	if (ret != ECORE_SUCCESS) {
922                 device_printf(ha->pci_dev,
923 			"%s: QLNX_LLDP_TYPE_MGMT_ADDR failed\n", __func__);
924 		goto qlnx_register_default_lldp_tlvs_exit;
925 	}
926 
927 	//register Organizationally Specific TLVs
928 	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
929 			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_ORG_SPECIFIC);
930 	if (ret != ECORE_SUCCESS) {
931                 device_printf(ha->pci_dev,
932 			"%s: QLNX_LLDP_TYPE_ORG_SPECIFIC failed\n", __func__);
933 	}
934 
935 qlnx_register_default_lldp_tlvs_exit:
936 	return (ret);
937 }
938 
939 int
940 qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
941 {
942 	int ret = 0;
943 	struct ecore_hwfn *p_hwfn;
944 	struct ecore_ptt *p_ptt;
945 	struct ecore_lldp_sys_tlvs tlv_params;
946 
947 	p_hwfn = &ha->cdev.hwfns[0];
948 	p_ptt = ecore_ptt_acquire(p_hwfn);
949 
950         if (!p_ptt) {
951                 device_printf(ha->pci_dev,
952 			"%s: ecore_ptt_acquire failed\n", __func__);
953                 return (ENXIO);
954         }
955 
956 	ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 0);
957 
958 	if (ret) {
959                 device_printf(ha->pci_dev,
960 			"%s: qlnx_lldp_configure disable failed\n", __func__);
961 		goto qlnx_set_lldp_tlvx_exit;
962 	}
963 
964 	ret = qlnx_register_default_lldp_tlvs(ha, p_hwfn, p_ptt);
965 
966 	if (ret) {
967                 device_printf(ha->pci_dev,
968 			"%s: qlnx_register_default_lldp_tlvs failed\n",
969 			__func__);
970 		goto qlnx_set_lldp_tlvx_exit;
971 	}
972 
973 	ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 1);
974 
975 	if (ret) {
976                 device_printf(ha->pci_dev,
977 			"%s: qlnx_lldp_configure enable failed\n", __func__);
978 		goto qlnx_set_lldp_tlvx_exit;
979 	}
980 
981 	if (lldp_tlvs != NULL) {
982 		bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
983 
984 		tlv_params.discard_mandatory_tlv =
985 			(lldp_tlvs->discard_mandatory_tlv ? true: false);
986 		tlv_params.buf_size = lldp_tlvs->buf_size;
987 		memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
988 
989 		ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
990 
991 		if (ret) {
992 			device_printf(ha->pci_dev,
993 				"%s: ecore_lldp_set_system_tlvs failed\n",
994 				__func__);
995 		}
996 	}
997 qlnx_set_lldp_tlvx_exit:
998 
999 	ecore_ptt_release(p_hwfn, p_ptt);
1000 	return (ret);
1001 }
1002 
1003 #endif /* #ifdef QLNX_USER_LLDP */
1004 
1005 static int
1006 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
1007 	struct thread *td)
1008 {
1009 	qlnx_host_t	*ha;
1010 	int		rval = 0;
1011 	struct ifnet	*ifp;
1012 	qlnx_trace_t	*trace;
1013 	int		i;
1014 
1015 	if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
1016 		return ENXIO;
1017 
1018 	ifp = ha->ifp;
1019 
1020 	switch (cmd) {
1021 
1022 	case QLNX_GRC_DUMP_SIZE:
1023 		qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
1024 		break;
1025 
1026 	case QLNX_GRC_DUMP:
1027 		rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
1028 		break;
1029 
1030 	case QLNX_IDLE_CHK_SIZE:
1031 		qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
1032 		break;
1033 
1034 	case QLNX_IDLE_CHK:
1035 		rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
1036 		break;
1037 
1038 	case QLNX_DRV_INFO:
1039 		rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
1040 		break;
1041 
1042 	case QLNX_DEV_SETTING:
1043 		rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
1044 		break;
1045 
1046 	case QLNX_GET_REGS:
1047 		rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
1048 		break;
1049 
1050 	case QLNX_NVRAM:
1051 		rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
1052 		break;
1053 
1054 	case QLNX_RD_WR_REG:
1055 		rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
1056 		break;
1057 
1058 	case QLNX_RD_WR_PCICFG:
1059 		rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
1060 		break;
1061 
1062 	case QLNX_MAC_ADDR:
1063 		qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
1064 		break;
1065 
1066 	case QLNX_STORM_STATS:
1067 		qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
1068 		break;
1069 
1070 	case QLNX_TRACE_SIZE:
1071 		qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
1072 		break;
1073 
1074 	case QLNX_TRACE:
1075 		trace = (qlnx_trace_t *)data;
1076 
1077 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
1078 
1079 			if (trace->size[i] && trace->cmd && trace->buffer[i])
1080 				rval = qlnx_get_trace(ha, i, trace);
1081 
1082 			if (rval)
1083 				break;
1084 		}
1085 		break;
1086 
1087 #ifdef QLNX_USER_LLDP
1088 	case QLNX_SET_LLDP_TLVS:
1089 		rval = qlnx_set_lldp_tlvx(ha, (qlnx_lldp_sys_tlvs_t *)data);
1090 		break;
1091 #endif /* #ifdef QLNX_USER_LLDP */
1092 
1093 	default:
1094 		rval = EINVAL;
1095 		break;
1096 	}
1097 
1098 	return (rval);
1099 }
1100 
1101