xref: /freebsd/sys/dev/liquidio/lio_main.c (revision 1794a0a8)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "lio_bsd.h"
35 #include "lio_common.h"
36 
37 #include "lio_droq.h"
38 #include "lio_iq.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
41 #include "lio_ctrl.h"
42 #include "lio_main.h"
43 #include "lio_network.h"
44 #include "cn23xx_pf_device.h"
45 #include "lio_image.h"
46 #include "lio_ioctl.h"
47 #include "lio_rxtx.h"
48 #include "lio_rss.h"
49 
50 /* Number of milliseconds to wait for DDR initialization */
51 #define LIO_DDR_TIMEOUT	10000
52 #define LIO_MAX_FW_TYPE_LEN	8
53 
54 static char fw_type[LIO_MAX_FW_TYPE_LEN];
55 TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
56 
57 /*
58  * Integers that specify number of queues per PF.
59  * Valid range is 0 to 64.
60  * Use 0 to derive from CPU count.
61  */
62 static int	num_queues_per_pf0;
63 static int	num_queues_per_pf1;
64 TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
65 TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
66 
67 #ifdef RSS
68 static int	lio_rss = 1;
69 TUNABLE_INT("hw.lio.rss", &lio_rss);
70 #endif	/* RSS */
71 
72 /* Hardware LRO */
73 unsigned int	lio_hwlro = 0;
74 TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
75 
76 /*
77  * Bitmask indicating which consoles have debug
78  * output redirected to syslog.
79  */
80 static unsigned long	console_bitmask;
81 TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
82 
83 /*
84  * \brief determines if a given console has debug enabled.
85  * @param console console to check
86  * @returns  1 = enabled. 0 otherwise
87  */
88 int
lio_console_debug_enabled(uint32_t console)89 lio_console_debug_enabled(uint32_t console)
90 {
91 
92 	return (console_bitmask >> (console)) & 0x1;
93 }
94 
95 static int	lio_detach(device_t dev);
96 
97 static int	lio_device_init(struct octeon_device *octeon_dev);
98 static int	lio_chip_specific_setup(struct octeon_device *oct);
99 static void	lio_watchdog(void *param);
100 static int	lio_load_firmware(struct octeon_device *oct);
101 static int	lio_nic_starter(struct octeon_device *oct);
102 static int	lio_init_nic_module(struct octeon_device *oct);
103 static int	lio_setup_nic_devices(struct octeon_device *octeon_dev);
104 static int	lio_link_info(struct lio_recv_info *recv_info, void *ptr);
105 static void	lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
106 				    void *buf);
107 static int	lio_set_rxcsum_command(if_t ifp, int command,
108 				       uint8_t rx_cmd);
109 static int	lio_setup_glists(struct octeon_device *oct, struct lio *lio,
110 				 int num_iqs);
111 static void	lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
112 static inline void	lio_update_link_status(if_t ifp,
113 					       union octeon_link_status *ls);
114 static void	lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
115 static int	lio_stop_nic_module(struct octeon_device *oct);
116 static void	lio_destroy_resources(struct octeon_device *oct);
117 static int	lio_setup_rx_oom_poll_fn(if_t ifp);
118 
119 static void	lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid);
120 static void	lio_vlan_rx_kill_vid(void *arg, if_t ifp,
121 				     uint16_t vid);
122 static struct octeon_device *
123 	lio_get_other_octeon_device(struct octeon_device *oct);
124 
125 static int	lio_wait_for_oq_pkts(struct octeon_device *oct);
126 
127 int	lio_send_rss_param(struct lio *lio);
128 static int	lio_dbg_console_print(struct octeon_device *oct,
129 				      uint32_t console_num, char *prefix,
130 				      char *suffix);
131 
132 /* Polling interval for determining when NIC application is alive */
133 #define LIO_STARTER_POLL_INTERVAL_MS	100
134 
135 /*
136  * vendor_info_array.
137  * This array contains the list of IDs on which the driver should load.
138  */
139 struct lio_vendor_info {
140 	uint16_t	vendor_id;
141 	uint16_t	device_id;
142 	uint16_t	subdevice_id;
143 	uint8_t		revision_id;
144 	uint8_t		index;
145 };
146 
147 static struct lio_vendor_info lio_pci_tbl[] = {
148 	/* CN2350 10G */
149 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
150 		0x02, 0},
151 
152 	/* CN2350 10G */
153 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
154 		0x02, 0},
155 
156 	/* CN2360 10G */
157 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
158 		0x02, 1},
159 
160 	/* CN2350 25G */
161 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
162 		0x02, 2},
163 
164 	/* CN2360 25G */
165 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
166 		0x02, 3},
167 
168 	{0, 0, 0, 0, 0}
169 };
170 
171 static char *lio_strings[] = {
172 	"LiquidIO 2350 10GbE Server Adapter",
173 	"LiquidIO 2360 10GbE Server Adapter",
174 	"LiquidIO 2350 25GbE Server Adapter",
175 	"LiquidIO 2360 25GbE Server Adapter",
176 };
177 
178 struct lio_if_cfg_resp {
179 	uint64_t	rh;
180 	struct octeon_if_cfg_info cfg_info;
181 	uint64_t	status;
182 };
183 
184 struct lio_if_cfg_context {
185 	int		octeon_id;
186 	volatile int	cond;
187 };
188 
189 struct lio_rx_ctl_context {
190 	int		octeon_id;
191 	volatile int	cond;
192 };
193 
194 static int
lio_probe(device_t dev)195 lio_probe(device_t dev)
196 {
197 	struct lio_vendor_info	*tbl;
198 
199 	uint16_t	vendor_id;
200 	uint16_t	device_id;
201 	uint16_t	subdevice_id;
202 	uint8_t		revision_id;
203 
204 	vendor_id = pci_get_vendor(dev);
205 	if (vendor_id != PCI_VENDOR_ID_CAVIUM)
206 		return (ENXIO);
207 
208 	device_id = pci_get_device(dev);
209 	subdevice_id = pci_get_subdevice(dev);
210 	revision_id = pci_get_revid(dev);
211 
212 	tbl = lio_pci_tbl;
213 	while (tbl->vendor_id) {
214 		if ((vendor_id == tbl->vendor_id) &&
215 		    (device_id == tbl->device_id) &&
216 		    (subdevice_id == tbl->subdevice_id) &&
217 		    (revision_id == tbl->revision_id)) {
218 			device_set_descf(dev, "%s, Version - %s",
219 			    lio_strings[tbl->index], LIO_VERSION);
220 			return (BUS_PROBE_DEFAULT);
221 		}
222 
223 		tbl++;
224 	}
225 
226 	return (ENXIO);
227 }
228 
229 static int
lio_attach(device_t device)230 lio_attach(device_t device)
231 {
232 	struct octeon_device	*oct_dev = NULL;
233 	uint64_t	scratch1;
234 	uint32_t	error;
235 	int		timeout, ret = 1;
236 	uint8_t		bus, dev, function;
237 
238 	oct_dev = lio_allocate_device(device);
239 	if (oct_dev == NULL) {
240 		device_printf(device, "Error: Unable to allocate device\n");
241 		return (-ENOMEM);
242 	}
243 
244 	oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
245 	oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
246 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
247 
248 	oct_dev->device = device;
249 	bus = pci_get_bus(device);
250 	dev = pci_get_slot(device);
251 	function = pci_get_function(device);
252 
253 	lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
254 		     pci_get_vendor(device), pci_get_device(device), bus, dev,
255 		     function);
256 
257 	if (lio_device_init(oct_dev)) {
258 		lio_dev_err(oct_dev, "Failed to init device\n");
259 		lio_detach(device);
260 		return (-ENOMEM);
261 	}
262 
263 	scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
264 	if (!(scratch1 & 4ULL)) {
265 		/*
266 		 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
267 		 * the lio watchdog kernel thread is running for this
268 		 * NIC.  Each NIC gets one watchdog kernel thread.
269 		 */
270 		scratch1 |= 4ULL;
271 		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
272 
273 		error = kproc_create(lio_watchdog, oct_dev,
274 				     &oct_dev->watchdog_task, 0, 0,
275 				     "liowd/%02hhx:%02hhx.%hhx", bus,
276 				     dev, function);
277 		if (!error) {
278 			kproc_resume(oct_dev->watchdog_task);
279 		} else {
280 			oct_dev->watchdog_task = NULL;
281 			lio_dev_err(oct_dev,
282 				    "failed to create kernel_thread\n");
283 			lio_detach(device);
284 			return (-1);
285 		}
286 	}
287 	oct_dev->rx_pause = 1;
288 	oct_dev->tx_pause = 1;
289 
290 	timeout = 0;
291 	while (timeout < LIO_NIC_STARTER_TIMEOUT) {
292 		lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
293 		timeout += LIO_STARTER_POLL_INTERVAL_MS;
294 
295 		/*
296 		 * During the boot process interrupts are not available.
297 		 * So polling for first control message from FW.
298 		 */
299 		if (cold)
300 			lio_droq_bh(oct_dev->droq[0], 0);
301 
302 		if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
303 			ret = lio_nic_starter(oct_dev);
304 			break;
305 		}
306 	}
307 
308 	if (ret) {
309 		lio_dev_err(oct_dev, "Firmware failed to start\n");
310 		lio_detach(device);
311 		return (-EIO);
312 	}
313 
314 	lio_dev_dbg(oct_dev, "Device is ready\n");
315 
316 	return (0);
317 }
318 
319 static int
lio_detach(device_t dev)320 lio_detach(device_t dev)
321 {
322 	struct octeon_device	*oct_dev = device_get_softc(dev);
323 
324 	lio_dev_dbg(oct_dev, "Stopping device\n");
325 	if (oct_dev->watchdog_task) {
326 		uint64_t	scratch1;
327 
328 		kproc_suspend(oct_dev->watchdog_task, 0);
329 
330 		scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
331 		scratch1 &= ~4ULL;
332 		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
333 	}
334 
335 	if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
336 		lio_stop_nic_module(oct_dev);
337 
338 	/*
339 	 * Reset the octeon device and cleanup all memory allocated for
340 	 * the octeon device by  driver.
341 	 */
342 	lio_destroy_resources(oct_dev);
343 
344 	lio_dev_info(oct_dev, "Device removed\n");
345 
346 	/*
347 	 * This octeon device has been removed. Update the global
348 	 * data structure to reflect this. Free the device structure.
349 	 */
350 	lio_free_device_mem(oct_dev);
351 	return (0);
352 }
353 
354 static int
lio_shutdown(device_t dev)355 lio_shutdown(device_t dev)
356 {
357 	struct octeon_device	*oct_dev = device_get_softc(dev);
358 	struct lio	*lio = if_getsoftc(oct_dev->props.ifp);
359 
360 	lio_send_rx_ctrl_cmd(lio, 0);
361 
362 	return (0);
363 }
364 
365 static int
lio_suspend(device_t dev)366 lio_suspend(device_t dev)
367 {
368 
369 	return (ENXIO);
370 }
371 
372 static int
lio_resume(device_t dev)373 lio_resume(device_t dev)
374 {
375 
376 	return (ENXIO);
377 }
378 
379 static int
lio_event(struct module * mod,int event,void * junk)380 lio_event(struct module *mod, int event, void *junk)
381 {
382 
383 	switch (event) {
384 	case MOD_LOAD:
385 		lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
386 		break;
387 	default:
388 		break;
389 	}
390 
391 	return (0);
392 }
393 
394 /*********************************************************************
395  *  FreeBSD Device Interface Entry Points
396  * *******************************************************************/
397 static device_method_t lio_methods[] = {
398 	/* Device interface */
399 	DEVMETHOD(device_probe, lio_probe),
400 	DEVMETHOD(device_attach, lio_attach),
401 	DEVMETHOD(device_detach, lio_detach),
402 	DEVMETHOD(device_shutdown, lio_shutdown),
403 	DEVMETHOD(device_suspend, lio_suspend),
404 	DEVMETHOD(device_resume, lio_resume),
405 	DEVMETHOD_END
406 };
407 
408 static driver_t lio_driver = {
409 	LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
410 };
411 
412 DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL);
413 
414 MODULE_DEPEND(lio, pci, 1, 1, 1);
415 MODULE_DEPEND(lio, ether, 1, 1, 1);
416 MODULE_DEPEND(lio, firmware, 1, 1, 1);
417 
418 static bool
fw_type_is_none(void)419 fw_type_is_none(void)
420 {
421 	return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
422 		       sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
423 }
424 
425 /*
426  * \brief Device initialization for each Octeon device that is probed
427  * @param octeon_dev  octeon device
428  */
429 static int
lio_device_init(struct octeon_device * octeon_dev)430 lio_device_init(struct octeon_device *octeon_dev)
431 {
432 	unsigned long	ddr_timeout = LIO_DDR_TIMEOUT;
433 	char	*dbg_enb = NULL;
434 	int	fw_loaded = 0;
435 	int	i, j, ret;
436 	uint8_t	bus, dev, function;
437 	char	bootcmd[] = "\n";
438 
439 	bus = pci_get_bus(octeon_dev->device);
440 	dev = pci_get_slot(octeon_dev->device);
441 	function = pci_get_function(octeon_dev->device);
442 
443 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
444 
445 	/* Enable access to the octeon device */
446 	if (pci_enable_busmaster(octeon_dev->device)) {
447 		lio_dev_err(octeon_dev, "pci_enable_device failed\n");
448 		return (1);
449 	}
450 
451 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
452 
453 	/* Identify the Octeon type and map the BAR address space. */
454 	if (lio_chip_specific_setup(octeon_dev)) {
455 		lio_dev_err(octeon_dev, "Chip specific setup failed\n");
456 		return (1);
457 	}
458 
459 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
460 
461 	/*
462 	 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
463 	 * since that is what is required for the reference to be removed
464 	 * during de-initialization (see 'octeon_destroy_resources').
465 	 */
466 	lio_register_device(octeon_dev, bus, dev, function, true);
467 
468 
469 	octeon_dev->app_mode = LIO_DRV_INVALID_APP;
470 
471 	if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
472 		fw_loaded = 0;
473 		/* Do a soft reset of the Octeon device. */
474 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
475 			return (1);
476 
477 		/* things might have changed */
478 		if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
479 			fw_loaded = 0;
480 		else
481 			fw_loaded = 1;
482 	} else {
483 		fw_loaded = 1;
484 	}
485 
486 	/*
487 	 * Initialize the dispatch mechanism used to push packets arriving on
488 	 * Octeon Output queues.
489 	 */
490 	if (lio_init_dispatch_list(octeon_dev))
491 		return (1);
492 
493 	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
494 				 LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
495 				 lio_core_drv_init, octeon_dev);
496 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
497 
498 	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
499 	if (ret) {
500 		lio_dev_err(octeon_dev,
501 			    "Failed to configure device registers\n");
502 		return (ret);
503 	}
504 
505 	/* Initialize soft command buffer pool */
506 	if (lio_setup_sc_buffer_pool(octeon_dev)) {
507 		lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
508 		return (1);
509 	}
510 
511 	atomic_store_rel_int(&octeon_dev->status,
512 			     LIO_DEV_SC_BUFF_POOL_INIT_DONE);
513 
514 	if (lio_allocate_ioq_vector(octeon_dev)) {
515 		lio_dev_err(octeon_dev,
516 			    "IOQ vector allocation failed\n");
517 		return (1);
518 	}
519 
520 	atomic_store_rel_int(&octeon_dev->status,
521 			     LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
522 
523 	for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
524 		octeon_dev->instr_queue[i] =
525 			malloc(sizeof(struct lio_instr_queue),
526 			       M_DEVBUF, M_NOWAIT | M_ZERO);
527 		if (octeon_dev->instr_queue[i] == NULL)
528 			return (1);
529 	}
530 
531 	/* Setup the data structures that manage this Octeon's Input queues. */
532 	if (lio_setup_instr_queue0(octeon_dev)) {
533 		lio_dev_err(octeon_dev,
534 			    "Instruction queue initialization failed\n");
535 		return (1);
536 	}
537 
538 	atomic_store_rel_int(&octeon_dev->status,
539 			     LIO_DEV_INSTR_QUEUE_INIT_DONE);
540 
541 	/*
542 	 * Initialize lists to manage the requests of different types that
543 	 * arrive from user & kernel applications for this octeon device.
544 	 */
545 
546 	if (lio_setup_response_list(octeon_dev)) {
547 		lio_dev_err(octeon_dev, "Response list allocation failed\n");
548 		return (1);
549 	}
550 
551 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
552 
553 	for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
554 		octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
555 					     M_DEVBUF, M_NOWAIT | M_ZERO);
556 		if (octeon_dev->droq[i] == NULL)
557 			return (1);
558 	}
559 
560 	if (lio_setup_output_queue0(octeon_dev)) {
561 		lio_dev_err(octeon_dev, "Output queue initialization failed\n");
562 		return (1);
563 	}
564 
565 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
566 
567 	/*
568 	 * Setup the interrupt handler and record the INT SUM register address
569 	 */
570 	if (lio_setup_interrupt(octeon_dev,
571 				octeon_dev->sriov_info.num_pf_rings))
572 		return (1);
573 
574 	/* Enable Octeon device interrupts */
575 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
576 
577 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
578 
579 	/*
580 	 * Send Credit for Octeon Output queues. Credits are always sent BEFORE
581 	 * the output queue is enabled.
582 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
583 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
584 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
585 	 * before any credits have been issued, causing the ring to be reset
586 	 * (and the f/w appear to never have started).
587 	 */
588 	for (j = 0; j < octeon_dev->num_oqs; j++)
589 		lio_write_csr32(octeon_dev,
590 				octeon_dev->droq[j]->pkts_credit_reg,
591 				octeon_dev->droq[j]->max_count);
592 
593 	/* Enable the input and output queues for this Octeon device */
594 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
595 	if (ret) {
596 		lio_dev_err(octeon_dev, "Failed to enable input/output queues");
597 		return (ret);
598 	}
599 
600 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
601 
602 	if (!fw_loaded) {
603 		lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
604 		if (!ddr_timeout) {
605 			lio_dev_info(octeon_dev,
606 				     "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
607 		}
608 
609 		lio_sleep_timeout(LIO_RESET_MSECS);
610 
611 		/*
612 		 * Wait for the octeon to initialize DDR after the
613 		 * soft-reset.
614 		 */
615 		while (!ddr_timeout) {
616 			if (pause("-", lio_ms_to_ticks(100))) {
617 				/* user probably pressed Control-C */
618 				return (1);
619 			}
620 		}
621 
622 		ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
623 		if (ret) {
624 			lio_dev_err(octeon_dev,
625 				    "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
626 				    ret);
627 			return (1);
628 		}
629 
630 		if (lio_wait_for_bootloader(octeon_dev, 1100)) {
631 			lio_dev_err(octeon_dev, "Board not responding\n");
632 			return (1);
633 		}
634 
635 		/* Divert uboot to take commands from host instead. */
636 		ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
637 
638 		lio_dev_dbg(octeon_dev, "Initializing consoles\n");
639 		ret = lio_init_consoles(octeon_dev);
640 		if (ret) {
641 			lio_dev_err(octeon_dev, "Could not access board consoles\n");
642 			return (1);
643 		}
644 
645 		/*
646 		 * If console debug enabled, specify empty string to
647 		 * use default enablement ELSE specify NULL string for
648 		 * 'disabled'.
649 		 */
650 		dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
651 		ret = lio_add_console(octeon_dev, 0, dbg_enb);
652 
653 		if (ret) {
654 			lio_dev_err(octeon_dev, "Could not access board console\n");
655 			return (1);
656 		} else if (lio_console_debug_enabled(0)) {
657 			/*
658 			 * If console was added AND we're logging console output
659 			 * then set our console print function.
660 			 */
661 			octeon_dev->console[0].print = lio_dbg_console_print;
662 		}
663 
664 		atomic_store_rel_int(&octeon_dev->status,
665 				     LIO_DEV_CONSOLE_INIT_DONE);
666 
667 		lio_dev_dbg(octeon_dev, "Loading firmware\n");
668 
669 		ret = lio_load_firmware(octeon_dev);
670 		if (ret) {
671 			lio_dev_err(octeon_dev, "Could not load firmware to board\n");
672 			return (1);
673 		}
674 	}
675 
676 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
677 
678 	return (0);
679 }
680 
681 /*
682  * \brief PCI FLR for each Octeon device.
683  * @param oct octeon device
684  */
685 static void
lio_pci_flr(struct octeon_device * oct)686 lio_pci_flr(struct octeon_device *oct)
687 {
688 	uint32_t	exppos, status;
689 
690 	pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
691 
692 	pci_save_state(oct->device);
693 
694 	/* Quiesce the device completely */
695 	pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
696 
697 	/* Wait for Transaction Pending bit clean */
698 	lio_mdelay(100);
699 
700 	status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
701 	if (status & PCIEM_STA_TRANSACTION_PND) {
702 		lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
703 		lio_mdelay(5);
704 
705 		status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
706 		if (status & PCIEM_STA_TRANSACTION_PND)
707 			lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
708 	}
709 
710 	pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
711 	lio_mdelay(100);
712 
713 	pci_restore_state(oct->device);
714 }
715 
716 /*
717  * \brief Debug console print function
718  * @param octeon_dev  octeon device
719  * @param console_num console number
720  * @param prefix      first portion of line to display
721  * @param suffix      second portion of line to display
722  *
723  * The OCTEON debug console outputs entire lines (excluding '\n').
724  * Normally, the line will be passed in the 'prefix' parameter.
725  * However, due to buffering, it is possible for a line to be split into two
726  * parts, in which case they will be passed as the 'prefix' parameter and
727  * 'suffix' parameter.
728  */
729 static int
lio_dbg_console_print(struct octeon_device * oct,uint32_t console_num,char * prefix,char * suffix)730 lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
731 		      char *prefix, char *suffix)
732 {
733 
734 	if (prefix != NULL && suffix != NULL)
735 		lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
736 	else if (prefix != NULL)
737 		lio_dev_info(oct, "%u: %s\n", console_num, prefix);
738 	else if (suffix != NULL)
739 		lio_dev_info(oct, "%u: %s\n", console_num, suffix);
740 
741 	return (0);
742 }
743 
744 static void
lio_watchdog(void * param)745 lio_watchdog(void *param)
746 {
747 	int		core_num;
748 	uint16_t	mask_of_crashed_or_stuck_cores = 0;
749 	struct octeon_device	*oct = param;
750 	bool		err_msg_was_printed[12];
751 
752 	bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
753 
754 	while (1) {
755 		kproc_suspend_check(oct->watchdog_task);
756 		mask_of_crashed_or_stuck_cores =
757 			(uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
758 
759 		if (mask_of_crashed_or_stuck_cores) {
760 			struct octeon_device *other_oct;
761 
762 			oct->cores_crashed = true;
763 			other_oct = lio_get_other_octeon_device(oct);
764 			if (other_oct != NULL)
765 				other_oct->cores_crashed = true;
766 
767 			for (core_num = 0; core_num < LIO_MAX_CORES;
768 			     core_num++) {
769 				bool core_crashed_or_got_stuck;
770 
771 				core_crashed_or_got_stuck =
772 				    (mask_of_crashed_or_stuck_cores >>
773 				     core_num) & 1;
774 				if (core_crashed_or_got_stuck &&
775 				    !err_msg_was_printed[core_num]) {
776 					lio_dev_err(oct,
777 						    "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
778 						    core_num);
779 					err_msg_was_printed[core_num] = true;
780 				}
781 			}
782 
783 		}
784 
785 		/* sleep for two seconds */
786 		pause("-", lio_ms_to_ticks(2000));
787 	}
788 }
789 
790 static int
lio_chip_specific_setup(struct octeon_device * oct)791 lio_chip_specific_setup(struct octeon_device *oct)
792 {
793 	char		*s;
794 	uint32_t	dev_id;
795 	int		ret = 1;
796 
797 	dev_id = lio_read_pci_cfg(oct, 0);
798 	oct->subdevice_id = pci_get_subdevice(oct->device);
799 
800 	switch (dev_id) {
801 	case LIO_CN23XX_PF_PCIID:
802 		oct->chip_id = LIO_CN23XX_PF_VID;
803 		if (pci_get_function(oct->device) == 0) {
804 			if (num_queues_per_pf0 < 0) {
805 				lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
806 					     num_queues_per_pf0);
807 				num_queues_per_pf0 = 0;
808 			}
809 
810 			oct->sriov_info.num_pf_rings = num_queues_per_pf0;
811 		} else {
812 			if (num_queues_per_pf1 < 0) {
813 				lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
814 					     num_queues_per_pf1);
815 				num_queues_per_pf1 = 0;
816 			}
817 
818 			oct->sriov_info.num_pf_rings = num_queues_per_pf1;
819 		}
820 
821 		ret = lio_cn23xx_pf_setup_device(oct);
822 		s = "CN23XX";
823 		break;
824 
825 	default:
826 		s = "?";
827 		lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
828 	}
829 
830 	if (!ret)
831 		lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
832 			     OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
833 			     lio_get_conf(oct)->card_name, LIO_VERSION);
834 
835 	return (ret);
836 }
837 
838 static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device * oct)839 lio_get_other_octeon_device(struct octeon_device *oct)
840 {
841 	struct octeon_device	*other_oct;
842 
843 	other_oct = lio_get_device(oct->octeon_id + 1);
844 
845 	if ((other_oct != NULL) && other_oct->device) {
846 		int	oct_busnum, other_oct_busnum;
847 
848 		oct_busnum = pci_get_bus(oct->device);
849 		other_oct_busnum = pci_get_bus(other_oct->device);
850 
851 		if (oct_busnum == other_oct_busnum) {
852 			int	oct_slot, other_oct_slot;
853 
854 			oct_slot = pci_get_slot(oct->device);
855 			other_oct_slot = pci_get_slot(other_oct->device);
856 
857 			if (oct_slot == other_oct_slot)
858 				return (other_oct);
859 		}
860 	}
861 	return (NULL);
862 }
863 
864 /*
865  * \brief Load firmware to device
866  * @param oct octeon device
867  *
868  * Maps device to firmware filename, requests firmware, and downloads it
869  */
870 static int
lio_load_firmware(struct octeon_device * oct)871 lio_load_firmware(struct octeon_device *oct)
872 {
873 	const struct firmware	*fw;
874 	char	*tmp_fw_type = NULL;
875 	int	ret = 0;
876 	char	fw_name[LIO_MAX_FW_FILENAME_LEN];
877 
878 	if (fw_type[0] == '\0')
879 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
880 	else
881 		tmp_fw_type = fw_type;
882 
883 	sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
884 		lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
885 
886 	fw = firmware_get(fw_name);
887 	if (fw == NULL) {
888 		lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
889 			    fw_name);
890 		return (EINVAL);
891 	}
892 
893 	ret = lio_download_firmware(oct, fw->data, fw->datasize);
894 
895 	firmware_put(fw, FIRMWARE_UNLOAD);
896 
897 	return (ret);
898 }
899 
900 static int
lio_nic_starter(struct octeon_device * oct)901 lio_nic_starter(struct octeon_device *oct)
902 {
903 	int	ret = 0;
904 
905 	atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
906 
907 	if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
908 		if (lio_init_nic_module(oct)) {
909 			lio_dev_err(oct, "NIC initialization failed\n");
910 			ret = -1;
911 #ifdef CAVIUM_ONiLY_23XX_VF
912 		} else {
913 			if (octeon_enable_sriov(oct) < 0)
914 				ret = -1;
915 #endif
916 		}
917 	} else {
918 		lio_dev_err(oct,
919 			    "Unexpected application running on NIC (%d). Check firmware.\n",
920 			    oct->app_mode);
921 		ret = -1;
922 	}
923 
924 	return (ret);
925 }
926 
927 static int
lio_init_nic_module(struct octeon_device * oct)928 lio_init_nic_module(struct octeon_device *oct)
929 {
930 	int	num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
931 	int	retval = 0;
932 
933 	lio_dev_dbg(oct, "Initializing network interfaces\n");
934 
935 	/*
936 	 * only default iq and oq were initialized
937 	 * initialize the rest as well
938 	 */
939 
940 	/* run port_config command for each port */
941 	oct->ifcount = num_nic_ports;
942 
943 	bzero(&oct->props, sizeof(struct lio_if_props));
944 
945 	oct->props.gmxport = -1;
946 
947 	retval = lio_setup_nic_devices(oct);
948 	if (retval) {
949 		lio_dev_err(oct, "Setup NIC devices failed\n");
950 		goto lio_init_failure;
951 	}
952 
953 	lio_dev_dbg(oct, "Network interfaces ready\n");
954 
955 	return (retval);
956 
957 lio_init_failure:
958 
959 	oct->ifcount = 0;
960 
961 	return (retval);
962 }
963 
964 static int
lio_ifmedia_update(if_t ifp)965 lio_ifmedia_update(if_t ifp)
966 {
967 	struct lio	*lio = if_getsoftc(ifp);
968 	struct ifmedia	*ifm;
969 
970 	ifm = &lio->ifmedia;
971 
972 	/* We only support Ethernet media type. */
973 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
974 		return (EINVAL);
975 
976 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
977 	case IFM_AUTO:
978 		break;
979 	case IFM_10G_CX4:
980 	case IFM_10G_SR:
981 	case IFM_10G_T:
982 	case IFM_10G_TWINAX:
983 	default:
984 		/* We don't support changing the media type. */
985 		lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
986 			    IFM_SUBTYPE(ifm->ifm_media));
987 		return (EINVAL);
988 	}
989 
990 	return (0);
991 }
992 
993 static int
lio_get_media_subtype(struct octeon_device * oct)994 lio_get_media_subtype(struct octeon_device *oct)
995 {
996 
997 	switch(oct->subdevice_id) {
998 	case LIO_CN2350_10G_SUBDEVICE:
999 	case LIO_CN2350_10G_SUBDEVICE1:
1000 	case LIO_CN2360_10G_SUBDEVICE:
1001 		return (IFM_10G_SR);
1002 
1003 	case LIO_CN2350_25G_SUBDEVICE:
1004 	case LIO_CN2360_25G_SUBDEVICE:
1005 		return (IFM_25G_SR);
1006 	}
1007 
1008 	return (IFM_10G_SR);
1009 }
1010 
1011 static uint64_t
lio_get_baudrate(struct octeon_device * oct)1012 lio_get_baudrate(struct octeon_device *oct)
1013 {
1014 
1015 	switch(oct->subdevice_id) {
1016 	case LIO_CN2350_10G_SUBDEVICE:
1017 	case LIO_CN2350_10G_SUBDEVICE1:
1018 	case LIO_CN2360_10G_SUBDEVICE:
1019 		return (IF_Gbps(10));
1020 
1021 	case LIO_CN2350_25G_SUBDEVICE:
1022 	case LIO_CN2360_25G_SUBDEVICE:
1023 		return (IF_Gbps(25));
1024 	}
1025 
1026 	return (IF_Gbps(10));
1027 }
1028 
1029 static void
lio_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)1030 lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
1031 {
1032 	struct lio	*lio = if_getsoftc(ifp);
1033 
1034 	/* Report link down if the driver isn't running. */
1035 	if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1036 		ifmr->ifm_active |= IFM_NONE;
1037 		return;
1038 	}
1039 
1040 	/* Setup the default interface info. */
1041 	ifmr->ifm_status = IFM_AVALID;
1042 	ifmr->ifm_active = IFM_ETHER;
1043 
1044 	if (lio->linfo.link.s.link_up) {
1045 		ifmr->ifm_status |= IFM_ACTIVE;
1046 	} else {
1047 		ifmr->ifm_active |= IFM_NONE;
1048 		return;
1049 	}
1050 
1051 	ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
1052 
1053 	if (lio->linfo.link.s.duplex)
1054 		ifmr->ifm_active |= IFM_FDX;
1055 	else
1056 		ifmr->ifm_active |= IFM_HDX;
1057 }
1058 
1059 static uint64_t
lio_get_counter(if_t ifp,ift_counter cnt)1060 lio_get_counter(if_t ifp, ift_counter cnt)
1061 {
1062 	struct lio	*lio = if_getsoftc(ifp);
1063 	struct octeon_device	*oct = lio->oct_dev;
1064 	uint64_t	counter = 0;
1065 	int		i, q_no;
1066 
1067 	switch (cnt) {
1068 	case IFCOUNTER_IPACKETS:
1069 		for (i = 0; i < oct->num_oqs; i++) {
1070 			q_no = lio->linfo.rxpciq[i].s.q_no;
1071 			counter += oct->droq[q_no]->stats.rx_pkts_received;
1072 		}
1073 		break;
1074 	case IFCOUNTER_OPACKETS:
1075 		for (i = 0; i < oct->num_iqs; i++) {
1076 			q_no = lio->linfo.txpciq[i].s.q_no;
1077 			counter += oct->instr_queue[q_no]->stats.tx_done;
1078 		}
1079 		break;
1080 	case IFCOUNTER_IBYTES:
1081 		for (i = 0; i < oct->num_oqs; i++) {
1082 			q_no = lio->linfo.rxpciq[i].s.q_no;
1083 			counter += oct->droq[q_no]->stats.rx_bytes_received;
1084 		}
1085 		break;
1086 	case IFCOUNTER_OBYTES:
1087 		for (i = 0; i < oct->num_iqs; i++) {
1088 			q_no = lio->linfo.txpciq[i].s.q_no;
1089 			counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
1090 		}
1091 		break;
1092 	case IFCOUNTER_IQDROPS:
1093 		for (i = 0; i < oct->num_oqs; i++) {
1094 			q_no = lio->linfo.rxpciq[i].s.q_no;
1095 			counter += oct->droq[q_no]->stats.rx_dropped;
1096 		}
1097 		break;
1098 	case IFCOUNTER_OQDROPS:
1099 		for (i = 0; i < oct->num_iqs; i++) {
1100 			q_no = lio->linfo.txpciq[i].s.q_no;
1101 			counter += oct->instr_queue[q_no]->stats.tx_dropped;
1102 		}
1103 		break;
1104 	case IFCOUNTER_IMCASTS:
1105 		counter = oct->link_stats.fromwire.total_mcst;
1106 		break;
1107 	case IFCOUNTER_OMCASTS:
1108 		counter = oct->link_stats.fromhost.mcast_pkts_sent;
1109 		break;
1110 	case IFCOUNTER_COLLISIONS:
1111 		counter = oct->link_stats.fromhost.total_collisions;
1112 		break;
1113 	case IFCOUNTER_IERRORS:
1114 		counter = oct->link_stats.fromwire.fcs_err +
1115 		    oct->link_stats.fromwire.l2_err +
1116 		    oct->link_stats.fromwire.frame_err;
1117 		break;
1118 	default:
1119 		return (if_get_counter_default(ifp, cnt));
1120 	}
1121 
1122 	return (counter);
1123 }
1124 
1125 static int
lio_init_ifnet(struct lio * lio)1126 lio_init_ifnet(struct lio *lio)
1127 {
1128 	struct octeon_device	*oct = lio->oct_dev;
1129 	if_t			ifp = lio->ifp;
1130 
1131 	/* ifconfig entrypoint for media type/status reporting */
1132 	ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
1133 		     lio_ifmedia_status);
1134 
1135 	/* set the default interface values */
1136 	ifmedia_add(&lio->ifmedia,
1137 		    (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
1138 		    0, NULL);
1139 	ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
1140 	ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
1141 
1142 	lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
1143 	lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
1144 
1145 	if_initname(ifp, device_get_name(oct->device),
1146 		    device_get_unit(oct->device));
1147 	if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
1148 	if_setioctlfn(ifp, lio_ioctl);
1149 	if_setgetcounterfn(ifp, lio_get_counter);
1150 	if_settransmitfn(ifp, lio_mq_start);
1151 	if_setqflushfn(ifp, lio_qflush);
1152 	if_setinitfn(ifp, lio_open);
1153 	if_setmtu(ifp, lio->linfo.link.s.mtu);
1154 	lio->mtu = lio->linfo.link.s.mtu;
1155 	if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1156 			     CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
1157 
1158 	if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1159 				    IFCAP_TSO | IFCAP_LRO |
1160 				    IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
1161 				    IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
1162 				    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
1163 				    IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
1164 
1165 	if_setcapenable(ifp, if_getcapabilities(ifp));
1166 	if_setbaudrate(ifp, lio_get_baudrate(oct));
1167 
1168 	return (0);
1169 }
1170 
1171 static void
lio_tcp_lro_free(struct octeon_device * octeon_dev,if_t ifp)1172 lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp)
1173 {
1174 	struct lio	*lio = if_getsoftc(ifp);
1175 	struct lio_droq	*droq;
1176 	int		q_no;
1177 	int		i;
1178 
1179 	for (i = 0; i < octeon_dev->num_oqs; i++) {
1180 		q_no = lio->linfo.rxpciq[i].s.q_no;
1181 		droq = octeon_dev->droq[q_no];
1182 		if (droq->lro.ifp) {
1183 			tcp_lro_free(&droq->lro);
1184 			droq->lro.ifp = NULL;
1185 		}
1186 	}
1187 }
1188 
1189 static int
lio_tcp_lro_init(struct octeon_device * octeon_dev,if_t ifp)1190 lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp)
1191 {
1192 	struct lio	*lio = if_getsoftc(ifp);
1193 	struct lio_droq	*droq;
1194 	struct lro_ctrl	*lro;
1195 	int		i, q_no, ret = 0;
1196 
1197 	for (i = 0; i < octeon_dev->num_oqs; i++) {
1198 		q_no = lio->linfo.rxpciq[i].s.q_no;
1199 		droq = octeon_dev->droq[q_no];
1200 		lro = &droq->lro;
1201 		ret = tcp_lro_init(lro);
1202 		if (ret) {
1203 			lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
1204 				    ret);
1205 			goto lro_init_failed;
1206 		}
1207 
1208 		lro->ifp = ifp;
1209 	}
1210 
1211 	return (ret);
1212 
1213 lro_init_failed:
1214 	lio_tcp_lro_free(octeon_dev, ifp);
1215 
1216 	return (ret);
1217 }
1218 
1219 static int
lio_setup_nic_devices(struct octeon_device * octeon_dev)1220 lio_setup_nic_devices(struct octeon_device *octeon_dev)
1221 {
1222 	union		octeon_if_cfg if_cfg;
1223 	struct lio	*lio = NULL;
1224 	if_t		ifp = NULL;
1225 	struct lio_version		*vdata;
1226 	struct lio_soft_command		*sc;
1227 	struct lio_if_cfg_context	*ctx;
1228 	struct lio_if_cfg_resp		*resp;
1229 	struct lio_if_props		*props;
1230 	int		num_iqueues, num_oqueues, retval;
1231 	unsigned int	base_queue;
1232 	unsigned int	gmx_port_id;
1233 	uint32_t	ctx_size, data_size;
1234 	uint32_t	ifidx_or_pfnum, resp_size;
1235 	uint8_t		mac[ETHER_HDR_LEN], i, j;
1236 
1237 	/* This is to handle link status changes */
1238 	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
1239 				 LIO_OPCODE_NIC_INFO,
1240 				 lio_link_info, octeon_dev);
1241 
1242 	for (i = 0; i < octeon_dev->ifcount; i++) {
1243 		resp_size = sizeof(struct lio_if_cfg_resp);
1244 		ctx_size = sizeof(struct lio_if_cfg_context);
1245 		data_size = sizeof(struct lio_version);
1246 		sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
1247 					    ctx_size);
1248 		if (sc == NULL)
1249 			return (ENOMEM);
1250 
1251 		resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1252 		ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1253 		vdata = (struct lio_version *)sc->virtdptr;
1254 
1255 		*((uint64_t *)vdata) = 0;
1256 		vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
1257 		vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
1258 		vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
1259 
1260 		num_iqueues = octeon_dev->sriov_info.num_pf_rings;
1261 		num_oqueues = octeon_dev->sriov_info.num_pf_rings;
1262 		base_queue = octeon_dev->sriov_info.pf_srn;
1263 
1264 		gmx_port_id = octeon_dev->pf_num;
1265 		ifidx_or_pfnum = octeon_dev->pf_num;
1266 
1267 		lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
1268 			    ifidx_or_pfnum, num_iqueues, num_oqueues);
1269 		ctx->cond = 0;
1270 		ctx->octeon_id = lio_get_device_id(octeon_dev);
1271 
1272 		if_cfg.if_cfg64 = 0;
1273 		if_cfg.s.num_iqueues = num_iqueues;
1274 		if_cfg.s.num_oqueues = num_oqueues;
1275 		if_cfg.s.base_queue = base_queue;
1276 		if_cfg.s.gmx_port_id = gmx_port_id;
1277 
1278 		sc->iq_no = 0;
1279 
1280 		lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
1281 					 LIO_OPCODE_NIC_IF_CFG, 0,
1282 					 if_cfg.if_cfg64, 0);
1283 
1284 		sc->callback = lio_if_cfg_callback;
1285 		sc->callback_arg = sc;
1286 		sc->wait_time = 3000;
1287 
1288 		retval = lio_send_soft_command(octeon_dev, sc);
1289 		if (retval == LIO_IQ_SEND_FAILED) {
1290 			lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
1291 				    retval);
1292 			/* Soft instr is freed by driver in case of failure. */
1293 			goto setup_nic_dev_fail;
1294 		}
1295 
1296 		/*
1297 		 * Sleep on a wait queue till the cond flag indicates that the
1298 		 * response arrived or timed-out.
1299 		 */
1300 		lio_sleep_cond(octeon_dev, &ctx->cond);
1301 
1302 		retval = resp->status;
1303 		if (retval) {
1304 			lio_dev_err(octeon_dev, "iq/oq config failed\n");
1305 			goto setup_nic_dev_fail;
1306 		}
1307 
1308 		lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1309 				 (sizeof(struct octeon_if_cfg_info)) >> 3);
1310 
1311 		num_iqueues = bitcount64(resp->cfg_info.iqmask);
1312 		num_oqueues = bitcount64(resp->cfg_info.oqmask);
1313 
1314 		if (!(num_iqueues) || !(num_oqueues)) {
1315 			lio_dev_err(octeon_dev,
1316 				    "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
1317 				    LIO_CAST64(resp->cfg_info.iqmask),
1318 				    LIO_CAST64(resp->cfg_info.oqmask));
1319 			goto setup_nic_dev_fail;
1320 		}
1321 
1322 		lio_dev_dbg(octeon_dev,
1323 			    "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
1324 			    i, LIO_CAST64(resp->cfg_info.iqmask),
1325 			    LIO_CAST64(resp->cfg_info.oqmask),
1326 			    num_iqueues, num_oqueues);
1327 
1328 		ifp = if_alloc(IFT_ETHER);
1329 
1330 		if (ifp == NULL) {
1331 			lio_dev_err(octeon_dev, "Device allocation failed\n");
1332 			goto setup_nic_dev_fail;
1333 		}
1334 
1335 		lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
1336 
1337 		if (lio == NULL) {
1338 			lio_dev_err(octeon_dev, "Lio allocation failed\n");
1339 			goto setup_nic_dev_fail;
1340 		}
1341 
1342 		if_setsoftc(ifp, lio);
1343 
1344 		if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE);
1345 		if_sethwtsomaxsegcount(ifp, LIO_MAX_SG);
1346 		if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
1347 
1348 		lio->ifidx = ifidx_or_pfnum;
1349 
1350 		props = &octeon_dev->props;
1351 		props->gmxport = resp->cfg_info.linfo.gmxport;
1352 		props->ifp = ifp;
1353 
1354 		lio->linfo.num_rxpciq = num_oqueues;
1355 		lio->linfo.num_txpciq = num_iqueues;
1356 		for (j = 0; j < num_oqueues; j++) {
1357 			lio->linfo.rxpciq[j].rxpciq64 =
1358 			    resp->cfg_info.linfo.rxpciq[j].rxpciq64;
1359 		}
1360 
1361 		for (j = 0; j < num_iqueues; j++) {
1362 			lio->linfo.txpciq[j].txpciq64 =
1363 			    resp->cfg_info.linfo.txpciq[j].txpciq64;
1364 		}
1365 
1366 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1367 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1368 		lio->linfo.link.link_status64 =
1369 		    resp->cfg_info.linfo.link.link_status64;
1370 
1371 		/*
1372 		 * Point to the properties for octeon device to which this
1373 		 * interface belongs.
1374 		 */
1375 		lio->oct_dev = octeon_dev;
1376 		lio->ifp = ifp;
1377 
1378 		lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
1379 			    lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
1380 		lio_init_ifnet(lio);
1381 		/* 64-bit swap required on LE machines */
1382 		lio_swap_8B_data(&lio->linfo.hw_addr, 1);
1383 		for (j = 0; j < 6; j++)
1384 			mac[j] = *((uint8_t *)(
1385 				   ((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
1386 
1387 		ether_ifattach(ifp, mac);
1388 
1389 		/*
1390 		 * By default all interfaces on a single Octeon uses the same
1391 		 * tx and rx queues
1392 		 */
1393 		lio->txq = lio->linfo.txpciq[0].s.q_no;
1394 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1395 		if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
1396 					lio->linfo.num_rxpciq)) {
1397 			lio_dev_err(octeon_dev, "I/O queues creation failed\n");
1398 			goto setup_nic_dev_fail;
1399 		}
1400 
1401 		lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
1402 
1403 		lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
1404 		lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
1405 
1406 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
1407 			lio_dev_err(octeon_dev, "Gather list allocation failed\n");
1408 			goto setup_nic_dev_fail;
1409 		}
1410 
1411 		if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
1412 			goto setup_nic_dev_fail;
1413 
1414 		if (lio_hwlro &&
1415 		    (if_getcapenable(ifp) & IFCAP_LRO) &&
1416 		    (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
1417 		    (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
1418 			lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
1419 					LIO_LROIPV4 | LIO_LROIPV6);
1420 
1421 		if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
1422 			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
1423 		else
1424 			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
1425 
1426 		if (lio_setup_rx_oom_poll_fn(ifp))
1427 			goto setup_nic_dev_fail;
1428 
1429 		lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
1430 			    i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1431 		lio->link_changes++;
1432 
1433 		lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
1434 
1435 		/*
1436 		 * Sending command to firmware to enable Rx checksum offload
1437 		 * by default at the time of setup of Liquidio driver for
1438 		 * this device
1439 		 */
1440 		lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
1441 				       LIO_CMD_RXCSUM_ENABLE);
1442 		lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
1443 				LIO_CMD_TXCSUM_ENABLE);
1444 
1445 #ifdef RSS
1446 		if (lio_rss) {
1447 			if (lio_send_rss_param(lio))
1448 				goto setup_nic_dev_fail;
1449 		} else
1450 #endif	/* RSS */
1451 
1452 			lio_set_feature(ifp, LIO_CMD_SET_FNV,
1453 					LIO_CMD_FNV_ENABLE);
1454 
1455 		lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
1456 
1457 		lio_free_soft_command(octeon_dev, sc);
1458 		lio->vlan_attach =
1459 		    EVENTHANDLER_REGISTER(vlan_config,
1460 					  lio_vlan_rx_add_vid, lio,
1461 					  EVENTHANDLER_PRI_FIRST);
1462 		lio->vlan_detach =
1463 		    EVENTHANDLER_REGISTER(vlan_unconfig,
1464 					  lio_vlan_rx_kill_vid, lio,
1465 					  EVENTHANDLER_PRI_FIRST);
1466 
1467 		/* Update stats periodically */
1468 		callout_init(&lio->stats_timer, 0);
1469 		lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
1470 
1471 		lio_add_hw_stats(lio);
1472 	}
1473 
1474 	return (0);
1475 
1476 setup_nic_dev_fail:
1477 
1478 	lio_free_soft_command(octeon_dev, sc);
1479 
1480 	while (i--) {
1481 		lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
1482 		lio_destroy_nic_device(octeon_dev, i);
1483 	}
1484 
1485 	return (ENODEV);
1486 }
1487 
1488 static int
lio_link_info(struct lio_recv_info * recv_info,void * ptr)1489 lio_link_info(struct lio_recv_info *recv_info, void *ptr)
1490 {
1491 	struct octeon_device	*oct = (struct octeon_device *)ptr;
1492 	struct lio_recv_pkt	*recv_pkt = recv_info->recv_pkt;
1493 	union octeon_link_status *ls;
1494 	int	gmxport = 0, i;
1495 
1496 	lio_dev_dbg(oct, "%s Called\n", __func__);
1497 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
1498 		lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1499 			    recv_pkt->buffer_size[0],
1500 			    recv_pkt->rh.r_nic_info.gmxport);
1501 		goto nic_info_err;
1502 	}
1503 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1504 	ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
1505 					  LIO_DROQ_INFO_SIZE);
1506 	lio_swap_8B_data((uint64_t *)ls,
1507 			 (sizeof(union octeon_link_status)) >> 3);
1508 
1509 	if (oct->props.gmxport == gmxport)
1510 		lio_update_link_status(oct->props.ifp, ls);
1511 
1512 nic_info_err:
1513 	for (i = 0; i < recv_pkt->buffer_count; i++)
1514 		lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
1515 
1516 	lio_free_recv_info(recv_info);
1517 	return (0);
1518 }
1519 
1520 void
lio_free_mbuf(struct lio_instr_queue * iq,struct lio_mbuf_free_info * finfo)1521 lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1522 {
1523 
1524 	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1525 	bus_dmamap_unload(iq->txtag, finfo->map);
1526 	m_freem(finfo->mb);
1527 }
1528 
1529 void
lio_free_sgmbuf(struct lio_instr_queue * iq,struct lio_mbuf_free_info * finfo)1530 lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1531 {
1532 	struct lio_gather	*g;
1533 	struct octeon_device	*oct;
1534 	struct lio		*lio;
1535 	int	iq_no;
1536 
1537 	g = finfo->g;
1538 	iq_no = iq->txpciq.s.q_no;
1539 	oct = iq->oct_dev;
1540 	lio = if_getsoftc(oct->props.ifp);
1541 
1542 	mtx_lock(&lio->glist_lock[iq_no]);
1543 	STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
1544 	mtx_unlock(&lio->glist_lock[iq_no]);
1545 
1546 	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1547 	bus_dmamap_unload(iq->txtag, finfo->map);
1548 	m_freem(finfo->mb);
1549 }
1550 
1551 static void
lio_if_cfg_callback(struct octeon_device * oct,uint32_t status,void * buf)1552 lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
1553 {
1554 	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
1555 	struct lio_if_cfg_resp	*resp;
1556 	struct lio_if_cfg_context *ctx;
1557 
1558 	resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1559 	ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1560 
1561 	oct = lio_get_device(ctx->octeon_id);
1562 	if (resp->status)
1563 		lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
1564 			    LIO_CAST64(resp->status), status);
1565 	ctx->cond = 1;
1566 
1567 	snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
1568 		 resp->cfg_info.lio_firmware_version);
1569 
1570 	/*
1571 	 * This barrier is required to be sure that the response has been
1572 	 * written fully before waking up the handler
1573 	 */
1574 	wmb();
1575 }
1576 
1577 static int
lio_is_mac_changed(uint8_t * new,uint8_t * old)1578 lio_is_mac_changed(uint8_t *new, uint8_t *old)
1579 {
1580 
1581 	return ((new[0] != old[0]) || (new[1] != old[1]) ||
1582 		(new[2] != old[2]) || (new[3] != old[3]) ||
1583 		(new[4] != old[4]) || (new[5] != old[5]));
1584 }
1585 
1586 void
lio_open(void * arg)1587 lio_open(void *arg)
1588 {
1589 	struct lio	*lio = arg;
1590 	if_t		ifp = lio->ifp;
1591 	struct octeon_device	*oct = lio->oct_dev;
1592 	uint8_t	*mac_new, mac_old[ETHER_HDR_LEN];
1593 	int	ret = 0;
1594 
1595 	lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
1596 
1597 	/* Ready for link status updates */
1598 	lio->intf_open = 1;
1599 
1600 	lio_dev_info(oct, "Interface Open, ready for traffic\n");
1601 
1602 	/* tell Octeon to start forwarding packets to host */
1603 	lio_send_rx_ctrl_cmd(lio, 1);
1604 
1605 	mac_new = if_getlladdr(ifp);
1606 	memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
1607 
1608 	if (lio_is_mac_changed(mac_new, mac_old)) {
1609 		ret = lio_set_mac(ifp, mac_new);
1610 		if (ret)
1611 			lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
1612 	}
1613 
1614 	/* Now inform the stack we're ready */
1615 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1616 
1617 	lio_dev_info(oct, "Interface is opened\n");
1618 }
1619 
1620 static int
lio_set_rxcsum_command(if_t ifp,int command,uint8_t rx_cmd)1621 lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd)
1622 {
1623 	struct lio_ctrl_pkt	nctrl;
1624 	struct lio		*lio = if_getsoftc(ifp);
1625 	struct octeon_device	*oct = lio->oct_dev;
1626 	int	ret = 0;
1627 
1628 	nctrl.ncmd.cmd64 = 0;
1629 	nctrl.ncmd.s.cmd = command;
1630 	nctrl.ncmd.s.param1 = rx_cmd;
1631 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1632 	nctrl.wait_time = 100;
1633 	nctrl.lio = lio;
1634 	nctrl.cb_fn = lio_ctrl_cmd_completion;
1635 
1636 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
1637 	if (ret < 0) {
1638 		lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
1639 			    ret);
1640 	}
1641 
1642 	return (ret);
1643 }
1644 
1645 static int
lio_stop_nic_module(struct octeon_device * oct)1646 lio_stop_nic_module(struct octeon_device *oct)
1647 {
1648 	int		i, j;
1649 	struct lio	*lio;
1650 
1651 	lio_dev_dbg(oct, "Stopping network interfaces\n");
1652 	if (!oct->ifcount) {
1653 		lio_dev_err(oct, "Init for Octeon was not completed\n");
1654 		return (1);
1655 	}
1656 
1657 	mtx_lock(&oct->cmd_resp_wqlock);
1658 	oct->cmd_resp_state = LIO_DRV_OFFLINE;
1659 	mtx_unlock(&oct->cmd_resp_wqlock);
1660 
1661 	for (i = 0; i < oct->ifcount; i++) {
1662 		lio = if_getsoftc(oct->props.ifp);
1663 		for (j = 0; j < oct->num_oqs; j++)
1664 			lio_unregister_droq_ops(oct,
1665 						lio->linfo.rxpciq[j].s.q_no);
1666 	}
1667 
1668 	callout_drain(&lio->stats_timer);
1669 
1670 	for (i = 0; i < oct->ifcount; i++)
1671 		lio_destroy_nic_device(oct, i);
1672 
1673 	lio_dev_dbg(oct, "Network interface stopped\n");
1674 
1675 	return (0);
1676 }
1677 
1678 static void
lio_delete_glists(struct octeon_device * oct,struct lio * lio)1679 lio_delete_glists(struct octeon_device *oct, struct lio *lio)
1680 {
1681 	struct lio_gather	*g;
1682 	int	i;
1683 
1684 	if (lio->glist_lock != NULL) {
1685 		free((void *)lio->glist_lock, M_DEVBUF);
1686 		lio->glist_lock = NULL;
1687 	}
1688 
1689 	if (lio->ghead == NULL)
1690 		return;
1691 
1692 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
1693 		do {
1694 			g = (struct lio_gather *)
1695 			    lio_delete_first_node(&lio->ghead[i]);
1696 			free(g, M_DEVBUF);
1697 		} while (g);
1698 
1699 		if ((lio->glists_virt_base != NULL) &&
1700 		    (lio->glists_virt_base[i] != NULL)) {
1701 			lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
1702 				     lio->glists_virt_base[i]);
1703 		}
1704 	}
1705 
1706 	free(lio->glists_virt_base, M_DEVBUF);
1707 	lio->glists_virt_base = NULL;
1708 
1709 	free(lio->glists_dma_base, M_DEVBUF);
1710 	lio->glists_dma_base = NULL;
1711 
1712 	free(lio->ghead, M_DEVBUF);
1713 	lio->ghead = NULL;
1714 }
1715 
1716 static int
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)1717 lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
1718 {
1719 	struct lio_gather	*g;
1720 	int	i, j;
1721 
1722 	lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
1723 				 M_NOWAIT | M_ZERO);
1724 	if (lio->glist_lock == NULL)
1725 		return (1);
1726 
1727 	lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
1728 			    M_NOWAIT | M_ZERO);
1729 	if (lio->ghead == NULL) {
1730 		free((void *)lio->glist_lock, M_DEVBUF);
1731 		lio->glist_lock = NULL;
1732 		return (1);
1733 	}
1734 
1735 	lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
1736 					 LIO_SG_ENTRY_SIZE);
1737 	/*
1738 	 * allocate memory to store virtual and dma base address of
1739 	 * per glist consistent memory
1740 	 */
1741 	lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
1742 				       M_NOWAIT | M_ZERO);
1743 	lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
1744 				      M_NOWAIT | M_ZERO);
1745 	if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
1746 		lio_delete_glists(oct, lio);
1747 		return (1);
1748 	}
1749 
1750 	for (i = 0; i < num_iqs; i++) {
1751 		mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
1752 
1753 		STAILQ_INIT(&lio->ghead[i]);
1754 
1755 		lio->glists_virt_base[i] =
1756 		    lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
1757 				  (vm_paddr_t *)&lio->glists_dma_base[i]);
1758 		if (lio->glists_virt_base[i] == NULL) {
1759 			lio_delete_glists(oct, lio);
1760 			return (1);
1761 		}
1762 
1763 		for (j = 0; j < lio->tx_qsize; j++) {
1764 			g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
1765 			if (g == NULL)
1766 				break;
1767 
1768 			g->sg = (struct lio_sg_entry *)(uintptr_t)
1769 			    ((uint64_t)(uintptr_t)lio->glists_virt_base[i] +
1770 			     (j * lio->glist_entry_size));
1771 			g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
1772 				(j * lio->glist_entry_size);
1773 			STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
1774 		}
1775 
1776 		if (j != lio->tx_qsize) {
1777 			lio_delete_glists(oct, lio);
1778 			return (1);
1779 		}
1780 	}
1781 
1782 	return (0);
1783 }
1784 
1785 void
lio_stop(if_t ifp)1786 lio_stop(if_t ifp)
1787 {
1788 	struct lio	*lio = if_getsoftc(ifp);
1789 	struct octeon_device	*oct = lio->oct_dev;
1790 
1791 	lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1792 	if_link_state_change(ifp, LINK_STATE_DOWN);
1793 
1794 	lio->intf_open = 0;
1795 	lio->linfo.link.s.link_up = 0;
1796 	lio->link_changes++;
1797 
1798 	lio_send_rx_ctrl_cmd(lio, 0);
1799 
1800 	/* Tell the stack that the interface is no longer active */
1801 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1802 
1803 	lio_dev_info(oct, "Interface is stopped\n");
1804 }
1805 
1806 static void
lio_check_rx_oom_status(struct lio * lio)1807 lio_check_rx_oom_status(struct lio *lio)
1808 {
1809 	struct lio_droq	*droq;
1810 	struct octeon_device *oct = lio->oct_dev;
1811 	int	desc_refilled;
1812 	int	q, q_no = 0;
1813 
1814 	for (q = 0; q < oct->num_oqs; q++) {
1815 		q_no = lio->linfo.rxpciq[q].s.q_no;
1816 		droq = oct->droq[q_no];
1817 		if (droq == NULL)
1818 			continue;
1819 		if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
1820 			mtx_lock(&droq->lock);
1821 			desc_refilled = lio_droq_refill(oct, droq);
1822 			/*
1823 			 * Flush the droq descriptor data to memory to be sure
1824 			 * that when we update the credits the data in memory
1825 			 * is accurate.
1826 			 */
1827 			wmb();
1828 			lio_write_csr32(oct, droq->pkts_credit_reg,
1829 					desc_refilled);
1830 			/* make sure mmio write completes */
1831 			__compiler_membar();
1832 			mtx_unlock(&droq->lock);
1833 		}
1834 	}
1835 }
1836 
1837 static void
lio_poll_check_rx_oom_status(void * arg,int pending __unused)1838 lio_poll_check_rx_oom_status(void *arg, int pending __unused)
1839 {
1840 	struct lio_tq	*rx_status_tq = arg;
1841 	struct lio	*lio = rx_status_tq->ctxptr;
1842 
1843 	if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
1844 		lio_check_rx_oom_status(lio);
1845 
1846 	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1847 				  lio_ms_to_ticks(50));
1848 }
1849 
1850 static int
lio_setup_rx_oom_poll_fn(if_t ifp)1851 lio_setup_rx_oom_poll_fn(if_t ifp)
1852 {
1853 	struct lio	*lio = if_getsoftc(ifp);
1854 	struct octeon_device	*oct = lio->oct_dev;
1855 	struct lio_tq	*rx_status_tq;
1856 
1857 	rx_status_tq = &lio->rx_status_tq;
1858 
1859 	rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
1860 					    taskqueue_thread_enqueue,
1861 					    &rx_status_tq->tq);
1862 	if (rx_status_tq->tq == NULL) {
1863 		lio_dev_err(oct, "unable to create lio rx oom status tq\n");
1864 		return (-1);
1865 	}
1866 
1867 	TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
1868 			  lio_poll_check_rx_oom_status, (void *)rx_status_tq);
1869 
1870 	rx_status_tq->ctxptr = lio;
1871 
1872 	taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
1873 				"lio%d_rx_oom_status",
1874 				oct->octeon_id);
1875 
1876 	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1877 				  lio_ms_to_ticks(50));
1878 
1879 	return (0);
1880 }
1881 
1882 static void
lio_cleanup_rx_oom_poll_fn(if_t ifp)1883 lio_cleanup_rx_oom_poll_fn(if_t ifp)
1884 {
1885 	struct lio	*lio = if_getsoftc(ifp);
1886 
1887 	if (lio->rx_status_tq.tq != NULL) {
1888 		while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
1889 						&lio->rx_status_tq.work, NULL))
1890 			taskqueue_drain_timeout(lio->rx_status_tq.tq,
1891 						&lio->rx_status_tq.work);
1892 
1893 		taskqueue_free(lio->rx_status_tq.tq);
1894 
1895 		lio->rx_status_tq.tq = NULL;
1896 	}
1897 }
1898 
1899 static void
lio_destroy_nic_device(struct octeon_device * oct,int ifidx)1900 lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1901 {
1902 	if_t		ifp = oct->props.ifp;
1903 	struct lio	*lio;
1904 
1905 	if (ifp == NULL) {
1906 		lio_dev_err(oct, "%s No ifp ptr for index %d\n",
1907 			    __func__, ifidx);
1908 		return;
1909 	}
1910 
1911 	lio = if_getsoftc(ifp);
1912 
1913 	lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
1914 
1915 	lio_dev_dbg(oct, "NIC device cleanup\n");
1916 
1917 	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1918 		lio_stop(ifp);
1919 
1920 	if (lio_wait_for_pending_requests(oct))
1921 		lio_dev_err(oct, "There were pending requests\n");
1922 
1923 	if (lio_wait_for_instr_fetch(oct))
1924 		lio_dev_err(oct, "IQ had pending instructions\n");
1925 
1926 	if (lio_wait_for_oq_pkts(oct))
1927 		lio_dev_err(oct, "OQ had pending packets\n");
1928 
1929 	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1930 		ether_ifdetach(ifp);
1931 
1932 	lio_tcp_lro_free(oct, ifp);
1933 
1934 	lio_cleanup_rx_oom_poll_fn(ifp);
1935 
1936 	lio_delete_glists(oct, lio);
1937 
1938 	EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
1939 	EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
1940 
1941 	free(lio, M_DEVBUF);
1942 
1943 	if_free(ifp);
1944 
1945 	oct->props.gmxport = -1;
1946 
1947 	oct->props.ifp = NULL;
1948 }
1949 
1950 static void
print_link_info(if_t ifp)1951 print_link_info(if_t ifp)
1952 {
1953 	struct lio	*lio = if_getsoftc(ifp);
1954 
1955 	if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
1956 	    lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
1957 		struct octeon_link_info *linfo = &lio->linfo;
1958 
1959 		if (linfo->link.s.link_up) {
1960 			lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
1961 				     linfo->link.s.speed,
1962 				     (linfo->link.s.duplex) ? "Full" : "Half");
1963 		} else {
1964 			lio_dev_info(lio->oct_dev, "Link Down\n");
1965 		}
1966 	}
1967 }
1968 
1969 static inline void
lio_update_link_status(if_t ifp,union octeon_link_status * ls)1970 lio_update_link_status(if_t ifp, union octeon_link_status *ls)
1971 {
1972 	struct lio	*lio = if_getsoftc(ifp);
1973 	int	changed = (lio->linfo.link.link_status64 != ls->link_status64);
1974 
1975 	lio->linfo.link.link_status64 = ls->link_status64;
1976 
1977 	if ((lio->intf_open) && (changed)) {
1978 		print_link_info(ifp);
1979 		lio->link_changes++;
1980 		if (lio->linfo.link.s.link_up)
1981 			if_link_state_change(ifp, LINK_STATE_UP);
1982 		else
1983 			if_link_state_change(ifp, LINK_STATE_DOWN);
1984 	}
1985 }
1986 
1987 /*
1988  * \brief Callback for rx ctrl
1989  * @param status status of request
1990  * @param buf pointer to resp structure
1991  */
1992 static void
lio_rx_ctl_callback(struct octeon_device * oct,uint32_t status,void * buf)1993 lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
1994 {
1995 	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
1996 	struct lio_rx_ctl_context *ctx;
1997 
1998 	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
1999 
2000 	oct = lio_get_device(ctx->octeon_id);
2001 	if (status)
2002 		lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
2003 			    LIO_CAST64(status));
2004 	ctx->cond = 1;
2005 
2006 	/*
2007 	 * This barrier is required to be sure that the response has been
2008 	 * written fully before waking up the handler
2009 	 */
2010 	wmb();
2011 }
2012 
2013 static void
lio_send_rx_ctrl_cmd(struct lio * lio,int start_stop)2014 lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
2015 {
2016 	struct lio_soft_command	*sc;
2017 	struct lio_rx_ctl_context *ctx;
2018 	union octeon_cmd	*ncmd;
2019 	struct octeon_device	*oct = (struct octeon_device *)lio->oct_dev;
2020 	int	ctx_size = sizeof(struct lio_rx_ctl_context);
2021 	int	retval;
2022 
2023 	if (oct->props.rx_on == start_stop)
2024 		return;
2025 
2026 	sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
2027 	if (sc == NULL)
2028 		return;
2029 
2030 	ncmd = (union octeon_cmd *)sc->virtdptr;
2031 	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2032 
2033 	ctx->cond = 0;
2034 	ctx->octeon_id = lio_get_device_id(oct);
2035 	ncmd->cmd64 = 0;
2036 	ncmd->s.cmd = LIO_CMD_RX_CTL;
2037 	ncmd->s.param1 = start_stop;
2038 
2039 	lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
2040 
2041 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2042 
2043 	lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
2044 				 0, 0);
2045 
2046 	sc->callback = lio_rx_ctl_callback;
2047 	sc->callback_arg = sc;
2048 	sc->wait_time = 5000;
2049 
2050 	retval = lio_send_soft_command(oct, sc);
2051 	if (retval == LIO_IQ_SEND_FAILED) {
2052 		lio_dev_err(oct, "Failed to send RX Control message\n");
2053 	} else {
2054 		/*
2055 		 * Sleep on a wait queue till the cond flag indicates that the
2056 		 * response arrived or timed-out.
2057 		 */
2058 		lio_sleep_cond(oct, &ctx->cond);
2059 		oct->props.rx_on = start_stop;
2060 	}
2061 
2062 	lio_free_soft_command(oct, sc);
2063 }
2064 
2065 static void
lio_vlan_rx_add_vid(void * arg,if_t ifp,uint16_t vid)2066 lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid)
2067 {
2068 	struct lio_ctrl_pkt	nctrl;
2069 	struct lio		*lio = if_getsoftc(ifp);
2070 	struct octeon_device	*oct = lio->oct_dev;
2071 	int	ret = 0;
2072 
2073 	if (if_getsoftc(ifp) != arg)	/* Not our event */
2074 		return;
2075 
2076 	if ((vid == 0) || (vid > 4095))	/* Invalid */
2077 		return;
2078 
2079 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2080 
2081 	nctrl.ncmd.cmd64 = 0;
2082 	nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
2083 	nctrl.ncmd.s.param1 = vid;
2084 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2085 	nctrl.wait_time = 100;
2086 	nctrl.lio = lio;
2087 	nctrl.cb_fn = lio_ctrl_cmd_completion;
2088 
2089 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2090 	if (ret < 0) {
2091 		lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
2092 			    ret);
2093 	}
2094 }
2095 
2096 static void
lio_vlan_rx_kill_vid(void * arg,if_t ifp,uint16_t vid)2097 lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid)
2098 {
2099 	struct lio_ctrl_pkt	nctrl;
2100 	struct lio		*lio = if_getsoftc(ifp);
2101 	struct octeon_device	*oct = lio->oct_dev;
2102 	int	ret = 0;
2103 
2104 	if (if_getsoftc(ifp) != arg)	/* Not our event */
2105 		return;
2106 
2107 	if ((vid == 0) || (vid > 4095))	/* Invalid */
2108 		return;
2109 
2110 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2111 
2112 	nctrl.ncmd.cmd64 = 0;
2113 	nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
2114 	nctrl.ncmd.s.param1 = vid;
2115 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2116 	nctrl.wait_time = 100;
2117 	nctrl.lio = lio;
2118 	nctrl.cb_fn = lio_ctrl_cmd_completion;
2119 
2120 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2121 	if (ret < 0) {
2122 		lio_dev_err(oct,
2123 			    "Kill VLAN filter failed in core (ret: 0x%x)\n",
2124 			    ret);
2125 	}
2126 }
2127 
2128 static int
lio_wait_for_oq_pkts(struct octeon_device * oct)2129 lio_wait_for_oq_pkts(struct octeon_device *oct)
2130 {
2131 	int	i, pending_pkts, pkt_cnt = 0, retry = 100;
2132 
2133 	do {
2134 		pending_pkts = 0;
2135 
2136 		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2137 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2138 				continue;
2139 
2140 			pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
2141 			if (pkt_cnt > 0) {
2142 				pending_pkts += pkt_cnt;
2143 				taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
2144 						  &oct->droq[i]->droq_task);
2145 			}
2146 		}
2147 
2148 		pkt_cnt = 0;
2149 		lio_sleep_timeout(1);
2150 	} while (retry-- && pending_pkts);
2151 
2152 	return (pkt_cnt);
2153 }
2154 
2155 static void
lio_destroy_resources(struct octeon_device * oct)2156 lio_destroy_resources(struct octeon_device *oct)
2157 {
2158 	int i, refcount;
2159 
2160 	switch (atomic_load_acq_int(&oct->status)) {
2161 	case LIO_DEV_RUNNING:
2162 	case LIO_DEV_CORE_OK:
2163 		/* No more instructions will be forwarded. */
2164 		atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
2165 
2166 		oct->app_mode = LIO_DRV_INVALID_APP;
2167 		lio_dev_dbg(oct, "Device state is now %s\n",
2168 			    lio_get_state_string(&oct->status));
2169 
2170 		lio_sleep_timeout(100);
2171 
2172 		/* fallthrough */
2173 	case LIO_DEV_HOST_OK:
2174 
2175 		/* fallthrough */
2176 	case LIO_DEV_CONSOLE_INIT_DONE:
2177 		/* Remove any consoles */
2178 		lio_remove_consoles(oct);
2179 
2180 		/* fallthrough */
2181 	case LIO_DEV_IO_QUEUES_DONE:
2182 		if (lio_wait_for_pending_requests(oct))
2183 			lio_dev_err(oct, "There were pending requests\n");
2184 
2185 		if (lio_wait_for_instr_fetch(oct))
2186 			lio_dev_err(oct, "IQ had pending instructions\n");
2187 
2188 		/*
2189 		 * Disable the input and output queues now. No more packets will
2190 		 * arrive from Octeon, but we should wait for all packet
2191 		 * processing to finish.
2192 		 */
2193 		oct->fn_list.disable_io_queues(oct);
2194 
2195 		if (lio_wait_for_oq_pkts(oct))
2196 			lio_dev_err(oct, "OQ had pending packets\n");
2197 
2198 		/* fallthrough */
2199 	case LIO_DEV_INTR_SET_DONE:
2200 		/* Disable interrupts  */
2201 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
2202 
2203 		if (oct->msix_on) {
2204 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
2205 				if (oct->ioq_vector[i].tag != NULL) {
2206 					bus_teardown_intr(oct->device,
2207 						  oct->ioq_vector[i].msix_res,
2208 						      oct->ioq_vector[i].tag);
2209 					oct->ioq_vector[i].tag = NULL;
2210 				}
2211 				if (oct->ioq_vector[i].msix_res != NULL) {
2212 					bus_release_resource(oct->device,
2213 						SYS_RES_IRQ,
2214 						oct->ioq_vector[i].vector,
2215 						oct->ioq_vector[i].msix_res);
2216 					oct->ioq_vector[i].msix_res = NULL;
2217 				}
2218 			}
2219 			/* non-iov vector's argument is oct struct */
2220 			if (oct->tag != NULL) {
2221 				bus_teardown_intr(oct->device, oct->msix_res,
2222 						  oct->tag);
2223 				oct->tag = NULL;
2224 			}
2225 
2226 			if (oct->msix_res != NULL) {
2227 				bus_release_resource(oct->device, SYS_RES_IRQ,
2228 						     oct->aux_vector,
2229 						     oct->msix_res);
2230 				oct->msix_res = NULL;
2231 			}
2232 
2233 			pci_release_msi(oct->device);
2234 		}
2235 		/* fallthrough */
2236 	case LIO_DEV_IN_RESET:
2237 	case LIO_DEV_DROQ_INIT_DONE:
2238 		/* Wait for any pending operations */
2239 		lio_mdelay(100);
2240 		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2241 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2242 				continue;
2243 			lio_delete_droq(oct, i);
2244 		}
2245 
2246 		/* fallthrough */
2247 	case LIO_DEV_RESP_LIST_INIT_DONE:
2248 		for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
2249 			if (oct->droq[i] != NULL) {
2250 				free(oct->droq[i], M_DEVBUF);
2251 				oct->droq[i] = NULL;
2252 			}
2253 		}
2254 		lio_delete_response_list(oct);
2255 
2256 		/* fallthrough */
2257 	case LIO_DEV_INSTR_QUEUE_INIT_DONE:
2258 		for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
2259 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
2260 				continue;
2261 
2262 			lio_delete_instr_queue(oct, i);
2263 		}
2264 
2265 		/* fallthrough */
2266 	case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
2267 		for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
2268 			if (oct->instr_queue[i] != NULL) {
2269 				free(oct->instr_queue[i], M_DEVBUF);
2270 				oct->instr_queue[i] = NULL;
2271 			}
2272 		}
2273 		lio_free_ioq_vector(oct);
2274 
2275 		/* fallthrough */
2276 	case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
2277 		lio_free_sc_buffer_pool(oct);
2278 
2279 		/* fallthrough */
2280 	case LIO_DEV_DISPATCH_INIT_DONE:
2281 		lio_delete_dispatch_list(oct);
2282 
2283 		/* fallthrough */
2284 	case LIO_DEV_PCI_MAP_DONE:
2285 		refcount = lio_deregister_device(oct);
2286 
2287 		if (fw_type_is_none())
2288 			lio_pci_flr(oct);
2289 
2290 		if (!refcount)
2291 			oct->fn_list.soft_reset(oct);
2292 
2293 		lio_unmap_pci_barx(oct, 0);
2294 		lio_unmap_pci_barx(oct, 1);
2295 
2296 		/* fallthrough */
2297 	case LIO_DEV_PCI_ENABLE_DONE:
2298 		/* Disable the device, releasing the PCI INT */
2299 		pci_disable_busmaster(oct->device);
2300 
2301 		/* fallthrough */
2302 	case LIO_DEV_BEGIN_STATE:
2303 		break;
2304 	}	/* end switch (oct->status) */
2305 }
2306