1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Marvell International Ltd.
4  *
5  * Helper functions for common, but complicated tasks.
6  */
7 
8 #include <log.h>
9 #include <linux/delay.h>
10 
11 #include <mach/cvmx-regs.h>
12 #include <mach/cvmx-csr.h>
13 #include <mach/cvmx-bootmem.h>
14 #include <mach/octeon-model.h>
15 #include <mach/cvmx-fuse.h>
16 #include <mach/octeon-feature.h>
17 #include <mach/cvmx-qlm.h>
18 #include <mach/octeon_qlm.h>
19 #include <mach/cvmx-pcie.h>
20 #include <mach/cvmx-coremask.h>
21 
22 #include <mach/cvmx-agl-defs.h>
23 #include <mach/cvmx-asxx-defs.h>
24 #include <mach/cvmx-bgxx-defs.h>
25 #include <mach/cvmx-dbg-defs.h>
26 #include <mach/cvmx-gmxx-defs.h>
27 #include <mach/cvmx-gserx-defs.h>
28 #include <mach/cvmx-ipd-defs.h>
29 #include <mach/cvmx-l2c-defs.h>
30 #include <mach/cvmx-npi-defs.h>
31 #include <mach/cvmx-pcsx-defs.h>
32 #include <mach/cvmx-pexp-defs.h>
33 #include <mach/cvmx-pki-defs.h>
34 #include <mach/cvmx-pko-defs.h>
35 #include <mach/cvmx-smix-defs.h>
36 #include <mach/cvmx-sriox-defs.h>
37 #include <mach/cvmx-helper.h>
38 #include <mach/cvmx-helper-board.h>
39 #include <mach/cvmx-helper-fdt.h>
40 #include <mach/cvmx-helper-bgx.h>
41 #include <mach/cvmx-helper-cfg.h>
42 #include <mach/cvmx-helper-ipd.h>
43 #include <mach/cvmx-helper-util.h>
44 #include <mach/cvmx-helper-pki.h>
45 #include <mach/cvmx-helper-pko.h>
46 #include <mach/cvmx-helper-pko3.h>
47 #include <mach/cvmx-global-resources.h>
48 #include <mach/cvmx-pko-internal-ports-range.h>
49 #include <mach/cvmx-pko3-queue.h>
50 #include <mach/cvmx-gmx.h>
51 #include <mach/cvmx-hwpko.h>
52 #include <mach/cvmx-ilk.h>
53 #include <mach/cvmx-ipd.h>
54 #include <mach/cvmx-pip.h>
55 
56 /**
57  * @INTERNAL
58  * This structure specifies the interface methods used by an interface.
59  *
60  * @param mode		Interface mode.
61  *
62  * @param enumerate	Method the get number of interface ports.
63  *
64  * @param probe		Method to probe an interface to get the number of
65  *			connected ports.
66  *
67  * @param enable	Method to enable an interface
68  *
69  * @param link_get	Method to get the state of an interface link.
70  *
71  * @param link_set	Method to configure an interface link to the specified
72  *			state.
73  *
74  * @param loopback	Method to configure a port in loopback.
75  */
76 struct iface_ops {
77 	cvmx_helper_interface_mode_t mode;
78 	int (*enumerate)(int xiface);
79 	int (*probe)(int xiface);
80 	int (*enable)(int xiface);
81 	cvmx_helper_link_info_t (*link_get)(int ipd_port);
82 	int (*link_set)(int ipd_port, cvmx_helper_link_info_t link_info);
83 	int (*loopback)(int ipd_port, int en_in, int en_ex);
84 };
85 
86 /**
87  * @INTERNAL
88  * This structure is used by disabled interfaces.
89  */
90 static const struct iface_ops iface_ops_dis = {
91 	.mode = CVMX_HELPER_INTERFACE_MODE_DISABLED,
92 };
93 
94 /**
95  * @INTERNAL
96  * This structure specifies the interface methods used by interfaces
97  * configured as gmii.
98  */
99 static const struct iface_ops iface_ops_gmii = {
100 	.mode = CVMX_HELPER_INTERFACE_MODE_GMII,
101 	.enumerate = __cvmx_helper_rgmii_probe,
102 	.probe = __cvmx_helper_rgmii_probe,
103 	.enable = __cvmx_helper_rgmii_enable,
104 	.link_get = __cvmx_helper_gmii_link_get,
105 	.link_set = __cvmx_helper_rgmii_link_set,
106 	.loopback = __cvmx_helper_rgmii_configure_loopback,
107 };
108 
109 /**
110  * @INTERNAL
111  * This structure specifies the interface methods used by interfaces
112  * configured as rgmii.
113  */
114 static const struct iface_ops iface_ops_rgmii = {
115 	.mode = CVMX_HELPER_INTERFACE_MODE_RGMII,
116 	.enumerate = __cvmx_helper_rgmii_probe,
117 	.probe = __cvmx_helper_rgmii_probe,
118 	.enable = __cvmx_helper_rgmii_enable,
119 	.link_get = __cvmx_helper_rgmii_link_get,
120 	.link_set = __cvmx_helper_rgmii_link_set,
121 	.loopback = __cvmx_helper_rgmii_configure_loopback,
122 };
123 
124 /**
125  * @INTERNAL
126  * This structure specifies the interface methods used by interfaces
127  * configured as sgmii that use the gmx mac.
128  */
129 static const struct iface_ops iface_ops_sgmii = {
130 	.mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
131 	.enumerate = __cvmx_helper_sgmii_enumerate,
132 	.probe = __cvmx_helper_sgmii_probe,
133 	.enable = __cvmx_helper_sgmii_enable,
134 	.link_get = __cvmx_helper_sgmii_link_get,
135 	.link_set = __cvmx_helper_sgmii_link_set,
136 	.loopback = __cvmx_helper_sgmii_configure_loopback,
137 };
138 
139 /**
140  * @INTERNAL
141  * This structure specifies the interface methods used by interfaces
142  * configured as sgmii that use the bgx mac.
143  */
144 static const struct iface_ops iface_ops_bgx_sgmii = {
145 	.mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
146 	.enumerate = __cvmx_helper_bgx_enumerate,
147 	.probe = __cvmx_helper_bgx_probe,
148 	.enable = __cvmx_helper_bgx_sgmii_enable,
149 	.link_get = __cvmx_helper_bgx_sgmii_link_get,
150 	.link_set = __cvmx_helper_bgx_sgmii_link_set,
151 	.loopback = __cvmx_helper_bgx_sgmii_configure_loopback,
152 };
153 
154 /**
155  * @INTERNAL
156  * This structure specifies the interface methods used by interfaces
157  * configured as qsgmii.
158  */
159 static const struct iface_ops iface_ops_qsgmii = {
160 	.mode = CVMX_HELPER_INTERFACE_MODE_QSGMII,
161 	.enumerate = __cvmx_helper_sgmii_enumerate,
162 	.probe = __cvmx_helper_sgmii_probe,
163 	.enable = __cvmx_helper_sgmii_enable,
164 	.link_get = __cvmx_helper_sgmii_link_get,
165 	.link_set = __cvmx_helper_sgmii_link_set,
166 	.loopback = __cvmx_helper_sgmii_configure_loopback,
167 };
168 
169 /**
170  * @INTERNAL
171  * This structure specifies the interface methods used by interfaces
172  * configured as xaui using the gmx mac.
173  */
174 static const struct iface_ops iface_ops_xaui = {
175 	.mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
176 	.enumerate = __cvmx_helper_xaui_enumerate,
177 	.probe = __cvmx_helper_xaui_probe,
178 	.enable = __cvmx_helper_xaui_enable,
179 	.link_get = __cvmx_helper_xaui_link_get,
180 	.link_set = __cvmx_helper_xaui_link_set,
181 	.loopback = __cvmx_helper_xaui_configure_loopback,
182 };
183 
184 /**
185  * @INTERNAL
186  * This structure specifies the interface methods used by interfaces
187  * configured as xaui using the gmx mac.
188  */
189 static const struct iface_ops iface_ops_bgx_xaui = {
190 	.mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
191 	.enumerate = __cvmx_helper_bgx_enumerate,
192 	.probe = __cvmx_helper_bgx_probe,
193 	.enable = __cvmx_helper_bgx_xaui_enable,
194 	.link_get = __cvmx_helper_bgx_xaui_link_get,
195 	.link_set = __cvmx_helper_bgx_xaui_link_set,
196 	.loopback = __cvmx_helper_bgx_xaui_configure_loopback,
197 };
198 
199 /**
200  * @INTERNAL
201  * This structure specifies the interface methods used by interfaces
202  * configured as rxaui.
203  */
204 static const struct iface_ops iface_ops_rxaui = {
205 	.mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
206 	.enumerate = __cvmx_helper_xaui_enumerate,
207 	.probe = __cvmx_helper_xaui_probe,
208 	.enable = __cvmx_helper_xaui_enable,
209 	.link_get = __cvmx_helper_xaui_link_get,
210 	.link_set = __cvmx_helper_xaui_link_set,
211 	.loopback = __cvmx_helper_xaui_configure_loopback,
212 };
213 
214 /**
215  * @INTERNAL
216  * This structure specifies the interface methods used by interfaces
217  * configured as xaui using the gmx mac.
218  */
219 static const struct iface_ops iface_ops_bgx_rxaui = {
220 	.mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
221 	.enumerate = __cvmx_helper_bgx_enumerate,
222 	.probe = __cvmx_helper_bgx_probe,
223 	.enable = __cvmx_helper_bgx_xaui_enable,
224 	.link_get = __cvmx_helper_bgx_xaui_link_get,
225 	.link_set = __cvmx_helper_bgx_xaui_link_set,
226 	.loopback = __cvmx_helper_bgx_xaui_configure_loopback,
227 };
228 
229 /**
230  * @INTERNAL
231  * This structure specifies the interface methods used by interfaces
232  * configured as xlaui.
233  */
234 static const struct iface_ops iface_ops_bgx_xlaui = {
235 	.mode = CVMX_HELPER_INTERFACE_MODE_XLAUI,
236 	.enumerate = __cvmx_helper_bgx_enumerate,
237 	.probe = __cvmx_helper_bgx_probe,
238 	.enable = __cvmx_helper_bgx_xaui_enable,
239 	.link_get = __cvmx_helper_bgx_xaui_link_get,
240 	.link_set = __cvmx_helper_bgx_xaui_link_set,
241 	.loopback = __cvmx_helper_bgx_xaui_configure_loopback,
242 };
243 
244 /**
245  * @INTERNAL
246  * This structure specifies the interface methods used by interfaces
247  * configured as xfi.
248  */
249 static const struct iface_ops iface_ops_bgx_xfi = {
250 	.mode = CVMX_HELPER_INTERFACE_MODE_XFI,
251 	.enumerate = __cvmx_helper_bgx_enumerate,
252 	.probe = __cvmx_helper_bgx_probe,
253 	.enable = __cvmx_helper_bgx_xaui_enable,
254 	.link_get = __cvmx_helper_bgx_xaui_link_get,
255 	.link_set = __cvmx_helper_bgx_xaui_link_set,
256 	.loopback = __cvmx_helper_bgx_xaui_configure_loopback,
257 };
258 
259 static const struct iface_ops iface_ops_bgx_10G_KR = {
260 	.mode = CVMX_HELPER_INTERFACE_MODE_10G_KR,
261 	.enumerate = __cvmx_helper_bgx_enumerate,
262 	.probe = __cvmx_helper_bgx_probe,
263 	.enable = __cvmx_helper_bgx_xaui_enable,
264 	.link_get = __cvmx_helper_bgx_xaui_link_get,
265 	.link_set = __cvmx_helper_bgx_xaui_link_set,
266 	.loopback = __cvmx_helper_bgx_xaui_configure_loopback,
267 };
268 
269 static const struct iface_ops iface_ops_bgx_40G_KR4 = {
270 	.mode = CVMX_HELPER_INTERFACE_MODE_40G_KR4,
271 	.enumerate = __cvmx_helper_bgx_enumerate,
272 	.probe = __cvmx_helper_bgx_probe,
273 	.enable = __cvmx_helper_bgx_xaui_enable,
274 	.link_get = __cvmx_helper_bgx_xaui_link_get,
275 	.link_set = __cvmx_helper_bgx_xaui_link_set,
276 	.loopback = __cvmx_helper_bgx_xaui_configure_loopback,
277 };
278 
279 /**
280  * @INTERNAL
281  * This structure specifies the interface methods used by interfaces
282  * configured as ilk.
283  */
284 static const struct iface_ops iface_ops_ilk = {
285 	.mode = CVMX_HELPER_INTERFACE_MODE_ILK,
286 	.enumerate = __cvmx_helper_ilk_enumerate,
287 	.probe = __cvmx_helper_ilk_probe,
288 	.enable = __cvmx_helper_ilk_enable,
289 	.link_get = __cvmx_helper_ilk_link_get,
290 	.link_set = __cvmx_helper_ilk_link_set,
291 };
292 
293 /**
294  * @INTERNAL
295  * This structure specifies the interface methods used by interfaces
296  * configured as npi.
297  */
298 static const struct iface_ops iface_ops_npi = {
299 	.mode = CVMX_HELPER_INTERFACE_MODE_NPI,
300 	.enumerate = __cvmx_helper_npi_probe,
301 	.probe = __cvmx_helper_npi_probe,
302 	.enable = __cvmx_helper_npi_enable,
303 };
304 
305 /**
306  * @INTERNAL
307  * This structure specifies the interface methods used by interfaces
308  * configured as srio.
309  */
310 static const struct iface_ops iface_ops_srio = {
311 	.mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
312 	.enumerate = __cvmx_helper_srio_probe,
313 	.probe = __cvmx_helper_srio_probe,
314 	.enable = __cvmx_helper_srio_enable,
315 	.link_get = __cvmx_helper_srio_link_get,
316 	.link_set = __cvmx_helper_srio_link_set,
317 };
318 
319 /**
320  * @INTERNAL
321  * This structure specifies the interface methods used by interfaces
322  * configured as agl.
323  */
324 static const struct iface_ops iface_ops_agl = {
325 	.mode = CVMX_HELPER_INTERFACE_MODE_AGL,
326 	.enumerate = __cvmx_helper_agl_enumerate,
327 	.probe = __cvmx_helper_agl_probe,
328 	.enable = __cvmx_helper_agl_enable,
329 	.link_get = __cvmx_helper_agl_link_get,
330 	.link_set = __cvmx_helper_agl_link_set,
331 };
332 
333 /**
334  * @INTERNAL
335  * This structure specifies the interface methods used by interfaces
336  * configured as mixed mode, some ports are sgmii and some are xfi.
337  */
338 static const struct iface_ops iface_ops_bgx_mixed = {
339 	.mode = CVMX_HELPER_INTERFACE_MODE_MIXED,
340 	.enumerate = __cvmx_helper_bgx_enumerate,
341 	.probe = __cvmx_helper_bgx_probe,
342 	.enable = __cvmx_helper_bgx_mixed_enable,
343 	.link_get = __cvmx_helper_bgx_mixed_link_get,
344 	.link_set = __cvmx_helper_bgx_mixed_link_set,
345 	.loopback = __cvmx_helper_bgx_mixed_configure_loopback,
346 };
347 
348 /**
349  * @INTERNAL
350  * This structure specifies the interface methods used by interfaces
351  * configured as loop.
352  */
353 static const struct iface_ops iface_ops_loop = {
354 	.mode = CVMX_HELPER_INTERFACE_MODE_LOOP,
355 	.enumerate = __cvmx_helper_loop_enumerate,
356 	.probe = __cvmx_helper_loop_probe,
357 };
358 
359 const struct iface_ops *iface_node_ops[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
360 #define iface_ops iface_node_ops[0]
361 
362 struct cvmx_iface {
363 	int cvif_ipd_nports;
364 	int cvif_has_fcs; /* PKO fcs for this interface. */
365 	enum cvmx_pko_padding cvif_padding;
366 	cvmx_helper_link_info_t *cvif_ipd_port_link_info;
367 };
368 
369 /*
370  * This has to be static as u-boot expects to probe an interface and
371  * gets the number of its ports.
372  */
373 static struct cvmx_iface cvmx_interfaces[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
374 
__cvmx_helper_get_num_ipd_ports(int xiface)375 int __cvmx_helper_get_num_ipd_ports(int xiface)
376 {
377 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
378 	struct cvmx_iface *piface;
379 
380 	if (xi.interface >= cvmx_helper_get_number_of_interfaces())
381 		return -1;
382 
383 	piface = &cvmx_interfaces[xi.node][xi.interface];
384 	return piface->cvif_ipd_nports;
385 }
386 
__cvmx_helper_get_pko_padding(int xiface)387 enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int xiface)
388 {
389 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
390 	struct cvmx_iface *piface;
391 
392 	if (xi.interface >= cvmx_helper_get_number_of_interfaces())
393 		return CVMX_PKO_PADDING_NONE;
394 
395 	piface = &cvmx_interfaces[xi.node][xi.interface];
396 	return piface->cvif_padding;
397 }
398 
__cvmx_helper_init_interface(int xiface,int num_ipd_ports,int has_fcs,enum cvmx_pko_padding pad)399 int __cvmx_helper_init_interface(int xiface, int num_ipd_ports, int has_fcs,
400 				 enum cvmx_pko_padding pad)
401 {
402 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
403 	struct cvmx_iface *piface;
404 	cvmx_helper_link_info_t *p;
405 	int i;
406 	int sz;
407 	u64 addr;
408 	char name[32];
409 
410 	if (xi.interface >= cvmx_helper_get_number_of_interfaces())
411 		return -1;
412 
413 	piface = &cvmx_interfaces[xi.node][xi.interface];
414 	piface->cvif_ipd_nports = num_ipd_ports;
415 	piface->cvif_padding = pad;
416 
417 	piface->cvif_has_fcs = has_fcs;
418 
419 	/*
420 	 * allocate the per-ipd_port link_info structure
421 	 */
422 	sz = piface->cvif_ipd_nports * sizeof(cvmx_helper_link_info_t);
423 	snprintf(name, sizeof(name), "__int_%d_link_info", xi.interface);
424 	addr = CAST64(cvmx_bootmem_alloc_named_range_once(sz, 0, 0,
425 							  __alignof(cvmx_helper_link_info_t),
426 							  name, NULL));
427 	piface->cvif_ipd_port_link_info =
428 		(cvmx_helper_link_info_t *)__cvmx_phys_addr_to_ptr(addr, sz);
429 	if (!piface->cvif_ipd_port_link_info) {
430 		if (sz != 0)
431 			debug("iface %d failed to alloc link info\n", xi.interface);
432 		return -1;
433 	}
434 
435 	/* Initialize them */
436 	p = piface->cvif_ipd_port_link_info;
437 
438 	for (i = 0; i < piface->cvif_ipd_nports; i++) {
439 		(*p).u64 = 0;
440 		p++;
441 	}
442 	return 0;
443 }
444 
445 /*
446  * Shut down the interfaces; free the resources.
447  * @INTERNAL
448  */
__cvmx_helper_shutdown_interfaces_node(unsigned int node)449 void __cvmx_helper_shutdown_interfaces_node(unsigned int node)
450 {
451 	int i;
452 	int nifaces; /* number of interfaces */
453 	struct cvmx_iface *piface;
454 
455 	nifaces = cvmx_helper_get_number_of_interfaces();
456 	for (i = 0; i < nifaces; i++) {
457 		piface = &cvmx_interfaces[node][i];
458 
459 		/*
460 		 * For SE apps, bootmem was meant to be allocated and never
461 		 * freed.
462 		 */
463 		piface->cvif_ipd_port_link_info = 0;
464 	}
465 }
466 
__cvmx_helper_shutdown_interfaces(void)467 void __cvmx_helper_shutdown_interfaces(void)
468 {
469 	unsigned int node = cvmx_get_node_num();
470 
471 	__cvmx_helper_shutdown_interfaces_node(node);
472 }
473 
__cvmx_helper_set_link_info(int xiface,int index,cvmx_helper_link_info_t link_info)474 int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
475 {
476 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
477 	struct cvmx_iface *piface;
478 
479 	if (xi.interface >= cvmx_helper_get_number_of_interfaces())
480 		return -1;
481 
482 	piface = &cvmx_interfaces[xi.node][xi.interface];
483 
484 	if (piface->cvif_ipd_port_link_info) {
485 		piface->cvif_ipd_port_link_info[index] = link_info;
486 		return 0;
487 	}
488 
489 	return -1;
490 }
491 
__cvmx_helper_get_link_info(int xiface,int port)492 cvmx_helper_link_info_t __cvmx_helper_get_link_info(int xiface, int port)
493 {
494 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
495 	struct cvmx_iface *piface;
496 	cvmx_helper_link_info_t err;
497 
498 	err.u64 = 0;
499 
500 	if (xi.interface >= cvmx_helper_get_number_of_interfaces())
501 		return err;
502 	piface = &cvmx_interfaces[xi.node][xi.interface];
503 
504 	if (piface->cvif_ipd_port_link_info)
505 		return piface->cvif_ipd_port_link_info[port];
506 
507 	return err;
508 }
509 
510 /**
511  * Returns if FCS is enabled for the specified interface and port
512  *
513  * @param xiface - interface to check
514  *
515  * @return zero if FCS is not used, otherwise FCS is used.
516  */
__cvmx_helper_get_has_fcs(int xiface)517 int __cvmx_helper_get_has_fcs(int xiface)
518 {
519 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
520 	return cvmx_interfaces[xi.node][xi.interface].cvif_has_fcs;
521 }
522 
523 u64 cvmx_rgmii_backpressure_dis = 1;
524 
525 typedef int (*cvmx_export_config_t)(void);
526 cvmx_export_config_t cvmx_export_app_config;
527 
cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)528 void cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)
529 {
530 	cvmx_rgmii_backpressure_dis = backpressure_dis;
531 }
532 
533 /*
534  * internal functions that are not exported in the .h file but must be
535  * declared to make gcc happy.
536  */
537 extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
538 
539 /**
540  * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
541  * It is meant to allow customization of interfaces which do not have a PHY.
542  *
543  * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides  TX_CONFIG_REG.
544  *
545  * If this function pointer is NULL then it defaults to the MAC.
546  */
547 int (*cvmx_override_iface_phy_mode)(int interface, int index);
548 
549 /**
550  * cvmx_override_ipd_port_setup(int ipd_port) is a function
551  * pointer. It is meant to allow customization of the IPD
552  * port/port kind setup before packet input/output comes online.
553  * It is called after cvmx-helper does the default IPD configuration,
554  * but before IPD is enabled. Users should set this pointer to a
555  * function before calling any cvmx-helper operations.
556  */
557 void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL;
558 
559 /**
560  * Return the number of interfaces the chip has. Each interface
561  * may have multiple ports. Most chips support two interfaces,
562  * but the CNX0XX and CNX1XX are exceptions. These only support
563  * one interface.
564  *
565  * @return Number of interfaces on chip
566  */
cvmx_helper_get_number_of_interfaces(void)567 int cvmx_helper_get_number_of_interfaces(void)
568 {
569 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
570 		return 9;
571 	else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
572 		if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
573 			return 7;
574 		else
575 			return 8;
576 	else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
577 		return 6;
578 	else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
579 		return 4;
580 	else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
581 		return 5;
582 	else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
583 		return 10;
584 	else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
585 		return 5;
586 	else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
587 		return 5;
588 	else
589 		return 3;
590 }
591 
__cvmx_helper_early_ports_on_interface(int interface)592 int __cvmx_helper_early_ports_on_interface(int interface)
593 {
594 	int ports;
595 
596 	if (octeon_has_feature(OCTEON_FEATURE_PKND))
597 		return cvmx_helper_interface_enumerate(interface);
598 
599 	ports = cvmx_helper_interface_enumerate(interface);
600 	ports = __cvmx_helper_board_interface_probe(interface, ports);
601 
602 	return ports;
603 }
604 
605 /**
606  * Return the number of ports on an interface. Depending on the
607  * chip and configuration, this can be 1-16. A value of 0
608  * specifies that the interface doesn't exist or isn't usable.
609  *
610  * @param xiface xiface to get the port count for
611  *
612  * @return Number of ports on interface. Can be Zero.
613  */
cvmx_helper_ports_on_interface(int xiface)614 int cvmx_helper_ports_on_interface(int xiface)
615 {
616 	if (octeon_has_feature(OCTEON_FEATURE_PKND))
617 		return cvmx_helper_interface_enumerate(xiface);
618 	else
619 		return __cvmx_helper_get_num_ipd_ports(xiface);
620 }
621 
622 /**
623  * @INTERNAL
624  * Return interface mode for CN70XX.
625  */
__cvmx_get_mode_cn70xx(int interface)626 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn70xx(int interface)
627 {
628 	/* SGMII/RXAUI/QSGMII */
629 	if (interface < 2) {
630 		enum cvmx_qlm_mode qlm_mode =
631 			cvmx_qlm_get_dlm_mode(0, interface);
632 
633 		if (qlm_mode == CVMX_QLM_MODE_SGMII)
634 			iface_ops[interface] = &iface_ops_sgmii;
635 		else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
636 			iface_ops[interface] = &iface_ops_qsgmii;
637 		else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
638 			iface_ops[interface] = &iface_ops_rxaui;
639 		else
640 			iface_ops[interface] = &iface_ops_dis;
641 	} else if (interface == 2) { /* DPI */
642 		iface_ops[interface] = &iface_ops_npi;
643 	} else if (interface == 3) { /* LOOP */
644 		iface_ops[interface] = &iface_ops_loop;
645 	} else if (interface == 4) { /* RGMII (AGL) */
646 		cvmx_agl_prtx_ctl_t prtx_ctl;
647 
648 		prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
649 		if (prtx_ctl.s.mode == 0)
650 			iface_ops[interface] = &iface_ops_agl;
651 		else
652 			iface_ops[interface] = &iface_ops_dis;
653 	} else {
654 		iface_ops[interface] = &iface_ops_dis;
655 	}
656 
657 	return iface_ops[interface]->mode;
658 }
659 
660 /**
661  * @INTERNAL
662  * Return interface mode for CN78XX.
663  */
__cvmx_get_mode_cn78xx(int xiface)664 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn78xx(int xiface)
665 {
666 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
667 	/* SGMII/RXAUI/XAUI */
668 	if (xi.interface < 6) {
669 		int qlm = cvmx_qlm_lmac(xiface, 0);
670 		enum cvmx_qlm_mode qlm_mode;
671 
672 		if (qlm == -1) {
673 			iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
674 			return iface_node_ops[xi.node][xi.interface]->mode;
675 		}
676 		qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, qlm);
677 
678 		if (qlm_mode == CVMX_QLM_MODE_SGMII)
679 			iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_sgmii;
680 		else if (qlm_mode == CVMX_QLM_MODE_XAUI)
681 			iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xaui;
682 		else if (qlm_mode == CVMX_QLM_MODE_XLAUI)
683 			iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xlaui;
684 		else if (qlm_mode == CVMX_QLM_MODE_XFI)
685 			iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xfi;
686 		else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
687 			iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_rxaui;
688 		else
689 			iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
690 	} else if (xi.interface < 8) {
691 		enum cvmx_qlm_mode qlm_mode;
692 		int found = 0;
693 		int i;
694 		int intf, lane_mask;
695 
696 		if (xi.interface == 6) {
697 			intf = 6;
698 			lane_mask = cvmx_ilk_lane_mask[xi.node][0];
699 		} else {
700 			intf = 7;
701 			lane_mask = cvmx_ilk_lane_mask[xi.node][1];
702 		}
703 		switch (lane_mask) {
704 		default:
705 		case 0x0:
706 			iface_node_ops[xi.node][intf] = &iface_ops_dis;
707 			break;
708 		case 0xf:
709 			qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 4);
710 			if (qlm_mode == CVMX_QLM_MODE_ILK)
711 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
712 			else
713 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
714 			break;
715 		case 0xff:
716 			found = 0;
717 			for (i = 4; i < 6; i++) {
718 				qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
719 				if (qlm_mode == CVMX_QLM_MODE_ILK)
720 					found++;
721 			}
722 			if (found == 2)
723 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
724 			else
725 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
726 			break;
727 		case 0xfff:
728 			found = 0;
729 			for (i = 4; i < 7; i++) {
730 				qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
731 				if (qlm_mode == CVMX_QLM_MODE_ILK)
732 					found++;
733 			}
734 			if (found == 3)
735 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
736 			else
737 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
738 			break;
739 		case 0xff00:
740 			found = 0;
741 			for (i = 6; i < 8; i++) {
742 				qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
743 				if (qlm_mode == CVMX_QLM_MODE_ILK)
744 					found++;
745 			}
746 			if (found == 2)
747 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
748 			else
749 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
750 			break;
751 		case 0xf0:
752 			qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 5);
753 			if (qlm_mode == CVMX_QLM_MODE_ILK)
754 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
755 			else
756 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
757 			break;
758 		case 0xf00:
759 			qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 6);
760 			if (qlm_mode == CVMX_QLM_MODE_ILK)
761 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
762 			else
763 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
764 			break;
765 		case 0xf000:
766 			qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 7);
767 			if (qlm_mode == CVMX_QLM_MODE_ILK)
768 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
769 			else
770 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
771 			break;
772 		case 0xfff0:
773 			found = 0;
774 			for (i = 5; i < 8; i++) {
775 				qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
776 				if (qlm_mode == CVMX_QLM_MODE_ILK)
777 					found++;
778 			}
779 			if (found == 3)
780 				iface_node_ops[xi.node][intf] = &iface_ops_ilk;
781 			else
782 				iface_node_ops[xi.node][intf] = &iface_ops_dis;
783 			break;
784 		}
785 	} else if (xi.interface == 8) { /* DPI */
786 		int qlm = 0;
787 
788 		for (qlm = 0; qlm < 5; qlm++) {
789 			/* if GSERX_CFG[pcie] == 1, then enable npi */
790 			if (csr_rd_node(xi.node, CVMX_GSERX_CFG(qlm)) & 0x1) {
791 				iface_node_ops[xi.node][xi.interface] =
792 					&iface_ops_npi;
793 				return iface_node_ops[xi.node][xi.interface]->mode;
794 			}
795 		}
796 		iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
797 	} else if (xi.interface == 9) { /* LOOP */
798 		iface_node_ops[xi.node][xi.interface] = &iface_ops_loop;
799 	} else {
800 		iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
801 	}
802 
803 	return iface_node_ops[xi.node][xi.interface]->mode;
804 }
805 
806 /**
807  * @INTERNAL
808  * Return interface mode for CN73XX.
809  */
__cvmx_get_mode_cn73xx(int xiface)810 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn73xx(int xiface)
811 {
812 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
813 	int interface = xi.interface;
814 
815 	/* SGMII/XAUI/XLAUI/XFI */
816 	if (interface < 3) {
817 		int qlm = cvmx_qlm_lmac(xiface, 0);
818 		enum cvmx_qlm_mode qlm_mode;
819 
820 		if (qlm == -1) {
821 			iface_ops[interface] = &iface_ops_dis;
822 			return iface_ops[interface]->mode;
823 		}
824 		qlm_mode = cvmx_qlm_get_mode(qlm);
825 
826 		switch (qlm_mode) {
827 		case CVMX_QLM_MODE_SGMII:
828 		case CVMX_QLM_MODE_SGMII_2X1:
829 		case CVMX_QLM_MODE_RGMII_SGMII:
830 		case CVMX_QLM_MODE_RGMII_SGMII_1X1:
831 			iface_ops[interface] = &iface_ops_bgx_sgmii;
832 			break;
833 		case CVMX_QLM_MODE_XAUI:
834 		case CVMX_QLM_MODE_RGMII_XAUI:
835 			iface_ops[interface] = &iface_ops_bgx_xaui;
836 			break;
837 		case CVMX_QLM_MODE_RXAUI:
838 		case CVMX_QLM_MODE_RXAUI_1X2:
839 		case CVMX_QLM_MODE_RGMII_RXAUI:
840 			iface_ops[interface] = &iface_ops_bgx_rxaui;
841 			break;
842 		case CVMX_QLM_MODE_XLAUI:
843 		case CVMX_QLM_MODE_RGMII_XLAUI:
844 			iface_ops[interface] = &iface_ops_bgx_xlaui;
845 			break;
846 		case CVMX_QLM_MODE_XFI:
847 		case CVMX_QLM_MODE_XFI_1X2:
848 		case CVMX_QLM_MODE_RGMII_XFI:
849 			iface_ops[interface] = &iface_ops_bgx_xfi;
850 			break;
851 		case CVMX_QLM_MODE_10G_KR:
852 		case CVMX_QLM_MODE_10G_KR_1X2:
853 		case CVMX_QLM_MODE_RGMII_10G_KR:
854 			iface_ops[interface] = &iface_ops_bgx_10G_KR;
855 			break;
856 		case CVMX_QLM_MODE_40G_KR4:
857 		case CVMX_QLM_MODE_RGMII_40G_KR4:
858 			iface_ops[interface] = &iface_ops_bgx_40G_KR4;
859 			break;
860 		case CVMX_QLM_MODE_MIXED:
861 			iface_ops[interface] = &iface_ops_bgx_mixed;
862 			break;
863 		default:
864 			iface_ops[interface] = &iface_ops_dis;
865 			break;
866 		}
867 	} else if (interface == 3) { /* DPI */
868 		iface_ops[interface] = &iface_ops_npi;
869 	} else if (interface == 4) { /* LOOP */
870 		iface_ops[interface] = &iface_ops_loop;
871 	} else {
872 		iface_ops[interface] = &iface_ops_dis;
873 	}
874 
875 	return iface_ops[interface]->mode;
876 }
877 
878 /**
879  * @INTERNAL
880  * Return interface mode for CNF75XX.
881  *
882  * CNF75XX has a single BGX block, which is attached to two DLMs,
883  * the first, GSER4 only supports SGMII mode, while the second,
884  * GSER5 supports 1G/10G single late modes, i.e. SGMII, XFI, 10G-KR.
885  * Each half-BGX is thus designated as a separate interface with two ports each.
886  */
__cvmx_get_mode_cnf75xx(int xiface)887 static cvmx_helper_interface_mode_t __cvmx_get_mode_cnf75xx(int xiface)
888 {
889 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
890 	int interface = xi.interface;
891 
892 	/* BGX0: SGMII (DLM4/DLM5)/XFI(DLM5)  */
893 	if (interface < 1) {
894 		enum cvmx_qlm_mode qlm_mode;
895 		int qlm = cvmx_qlm_lmac(xiface, 0);
896 
897 		if (qlm == -1) {
898 			iface_ops[interface] = &iface_ops_dis;
899 			return iface_ops[interface]->mode;
900 		}
901 		qlm_mode = cvmx_qlm_get_mode(qlm);
902 
903 		switch (qlm_mode) {
904 		case CVMX_QLM_MODE_SGMII:
905 		case CVMX_QLM_MODE_SGMII_2X1:
906 			iface_ops[interface] = &iface_ops_bgx_sgmii;
907 			break;
908 		case CVMX_QLM_MODE_XFI_1X2:
909 			iface_ops[interface] = &iface_ops_bgx_xfi;
910 			break;
911 		case CVMX_QLM_MODE_10G_KR_1X2:
912 			iface_ops[interface] = &iface_ops_bgx_10G_KR;
913 			break;
914 		case CVMX_QLM_MODE_MIXED:
915 			iface_ops[interface] = &iface_ops_bgx_mixed;
916 			break;
917 		default:
918 			iface_ops[interface] = &iface_ops_dis;
919 			break;
920 		}
921 	} else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
922 		cvmx_sriox_status_reg_t sriox_status_reg;
923 		int srio_port = interface - 1;
924 
925 		sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
926 
927 		if (sriox_status_reg.s.srio)
928 			iface_ops[interface] = &iface_ops_srio;
929 		else
930 			iface_ops[interface] = &iface_ops_dis;
931 	} else if (interface == 3) { /* DPI */
932 		iface_ops[interface] = &iface_ops_npi;
933 	} else if (interface == 4) { /* LOOP */
934 		iface_ops[interface] = &iface_ops_loop;
935 	} else {
936 		iface_ops[interface] = &iface_ops_dis;
937 	}
938 
939 	return iface_ops[interface]->mode;
940 }
941 
942 /**
943  * @INTERNAL
944  * Return interface mode for CN68xx.
945  */
__cvmx_get_mode_cn68xx(int interface)946 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
947 {
948 	union cvmx_mio_qlmx_cfg qlm_cfg;
949 
950 	switch (interface) {
951 	case 0:
952 		qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
953 		/* QLM is disabled when QLM SPD is 15. */
954 		if (qlm_cfg.s.qlm_spd == 15)
955 			iface_ops[interface] = &iface_ops_dis;
956 		else if (qlm_cfg.s.qlm_cfg == 7)
957 			iface_ops[interface] = &iface_ops_rxaui;
958 		else if (qlm_cfg.s.qlm_cfg == 2)
959 			iface_ops[interface] = &iface_ops_sgmii;
960 		else if (qlm_cfg.s.qlm_cfg == 3)
961 			iface_ops[interface] = &iface_ops_xaui;
962 		else
963 			iface_ops[interface] = &iface_ops_dis;
964 		break;
965 
966 	case 1:
967 		qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
968 		/* QLM is disabled when QLM SPD is 15. */
969 		if (qlm_cfg.s.qlm_spd == 15)
970 			iface_ops[interface] = &iface_ops_dis;
971 		else if (qlm_cfg.s.qlm_cfg == 7)
972 			iface_ops[interface] = &iface_ops_rxaui;
973 		else
974 			iface_ops[interface] = &iface_ops_dis;
975 		break;
976 
977 	case 2:
978 	case 3:
979 	case 4:
980 		qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface));
981 		/* QLM is disabled when QLM SPD is 15. */
982 		if (qlm_cfg.s.qlm_spd == 15)
983 			iface_ops[interface] = &iface_ops_dis;
984 		else if (qlm_cfg.s.qlm_cfg == 2)
985 			iface_ops[interface] = &iface_ops_sgmii;
986 		else if (qlm_cfg.s.qlm_cfg == 3)
987 			iface_ops[interface] = &iface_ops_xaui;
988 		else
989 			iface_ops[interface] = &iface_ops_dis;
990 		break;
991 
992 	case 5:
993 	case 6:
994 		qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface - 4));
995 		/* QLM is disabled when QLM SPD is 15. */
996 		if (qlm_cfg.s.qlm_spd == 15)
997 			iface_ops[interface] = &iface_ops_dis;
998 		else if (qlm_cfg.s.qlm_cfg == 1)
999 			iface_ops[interface] = &iface_ops_ilk;
1000 		else
1001 			iface_ops[interface] = &iface_ops_dis;
1002 		break;
1003 
1004 	case 7: {
1005 		union cvmx_mio_qlmx_cfg qlm_cfg1;
1006 		/* Check if PCIe0/PCIe1 is configured for PCIe */
1007 		qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(3));
1008 		qlm_cfg1.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
1009 		/* QLM is disabled when QLM SPD is 15. */
1010 		if ((qlm_cfg.s.qlm_spd != 15 && qlm_cfg.s.qlm_cfg == 0) ||
1011 		    (qlm_cfg1.s.qlm_spd != 15 && qlm_cfg1.s.qlm_cfg == 0))
1012 			iface_ops[interface] = &iface_ops_npi;
1013 		else
1014 			iface_ops[interface] = &iface_ops_dis;
1015 	} break;
1016 
1017 	case 8:
1018 		iface_ops[interface] = &iface_ops_loop;
1019 		break;
1020 
1021 	default:
1022 		iface_ops[interface] = &iface_ops_dis;
1023 		break;
1024 	}
1025 
1026 	return iface_ops[interface]->mode;
1027 }
1028 
1029 /**
1030  * @INTERNAL
1031  * Return interface mode for an Octeon II
1032  */
__cvmx_get_mode_octeon2(int interface)1033 static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
1034 {
1035 	union cvmx_gmxx_inf_mode mode;
1036 
1037 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1038 		return __cvmx_get_mode_cn68xx(interface);
1039 
1040 	if (interface == 2) {
1041 		iface_ops[interface] = &iface_ops_npi;
1042 	} else if (interface == 3) {
1043 		iface_ops[interface] = &iface_ops_loop;
1044 	} else if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
1045 		    (interface == 4 || interface == 5)) ||
1046 		   (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
1047 		    interface <= 7)) {
1048 		/* Only present in CN63XX & CN66XX Octeon model */
1049 		union cvmx_sriox_status_reg sriox_status_reg;
1050 
1051 		/* cn66xx pass1.0 has only 2 SRIO interfaces. */
1052 		if ((interface == 5 || interface == 7) &&
1053 		    OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0)) {
1054 			iface_ops[interface] = &iface_ops_dis;
1055 		} else if (interface == 5 && OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1056 			/*
1057 			 * Later passes of cn66xx support SRIO0 - x4/x2/x1,
1058 			 * SRIO2 - x2/x1, SRIO3 - x1
1059 			 */
1060 			iface_ops[interface] = &iface_ops_dis;
1061 		} else {
1062 			sriox_status_reg.u64 =
1063 				csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
1064 			if (sriox_status_reg.s.srio)
1065 				iface_ops[interface] = &iface_ops_srio;
1066 			else
1067 				iface_ops[interface] = &iface_ops_dis;
1068 		}
1069 	} else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1070 		union cvmx_mio_qlmx_cfg mio_qlm_cfg;
1071 
1072 		/* QLM2 is SGMII0 and QLM1 is SGMII1 */
1073 		if (interface == 0) {
1074 			mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
1075 		} else if (interface == 1) {
1076 			mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
1077 		} else {
1078 			iface_ops[interface] = &iface_ops_dis;
1079 			return iface_ops[interface]->mode;
1080 		}
1081 
1082 		if (mio_qlm_cfg.s.qlm_spd == 15)
1083 			iface_ops[interface] = &iface_ops_dis;
1084 		else if (mio_qlm_cfg.s.qlm_cfg == 9)
1085 			iface_ops[interface] = &iface_ops_sgmii;
1086 		else if (mio_qlm_cfg.s.qlm_cfg == 11)
1087 			iface_ops[interface] = &iface_ops_xaui;
1088 		else
1089 			iface_ops[interface] = &iface_ops_dis;
1090 	} else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
1091 		union cvmx_mio_qlmx_cfg qlm_cfg;
1092 
1093 		if (interface == 0) {
1094 			qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
1095 		} else if (interface == 1) {
1096 			qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
1097 		} else {
1098 			iface_ops[interface] = &iface_ops_dis;
1099 			return iface_ops[interface]->mode;
1100 		}
1101 
1102 		if (qlm_cfg.s.qlm_spd == 15)
1103 			iface_ops[interface] = &iface_ops_dis;
1104 		else if (qlm_cfg.s.qlm_cfg == 2)
1105 			iface_ops[interface] = &iface_ops_sgmii;
1106 		else if (qlm_cfg.s.qlm_cfg == 3)
1107 			iface_ops[interface] = &iface_ops_xaui;
1108 		else
1109 			iface_ops[interface] = &iface_ops_dis;
1110 	} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
1111 		if (interface == 0) {
1112 			union cvmx_mio_qlmx_cfg qlm_cfg;
1113 
1114 			qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
1115 			if (qlm_cfg.s.qlm_cfg == 2)
1116 				iface_ops[interface] = &iface_ops_sgmii;
1117 			else
1118 				iface_ops[interface] = &iface_ops_dis;
1119 		} else {
1120 			iface_ops[interface] = &iface_ops_dis;
1121 		}
1122 	} else if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1123 		iface_ops[interface] = &iface_ops_dis;
1124 	} else {
1125 		mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
1126 
1127 		if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1128 			switch (mode.cn63xx.mode) {
1129 			case 0:
1130 				iface_ops[interface] = &iface_ops_sgmii;
1131 				break;
1132 
1133 			case 1:
1134 				iface_ops[interface] = &iface_ops_xaui;
1135 				break;
1136 
1137 			default:
1138 				iface_ops[interface] = &iface_ops_dis;
1139 				break;
1140 			}
1141 		} else {
1142 			if (!mode.s.en)
1143 				iface_ops[interface] = &iface_ops_dis;
1144 			else if (mode.s.type)
1145 				iface_ops[interface] = &iface_ops_gmii;
1146 			else
1147 				iface_ops[interface] = &iface_ops_rgmii;
1148 		}
1149 	}
1150 
1151 	return iface_ops[interface]->mode;
1152 }
1153 
1154 /**
1155  * Get the operating mode of an interface. Depending on the Octeon
1156  * chip and configuration, this function returns an enumeration
1157  * of the type of packet I/O supported by an interface.
1158  *
1159  * @param xiface Interface to probe
1160  *
1161  * @return Mode of the interface. Unknown or unsupported interfaces return
1162  *         DISABLED.
1163  */
cvmx_helper_interface_get_mode(int xiface)1164 cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int xiface)
1165 {
1166 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1167 
1168 	if (xi.interface < 0 ||
1169 	    xi.interface >= cvmx_helper_get_number_of_interfaces())
1170 		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
1171 
1172 	/*
1173 	 * Check if the interface mode has been already cached. If it has,
1174 	 * simply return it. Otherwise, fall through the rest of the code to
1175 	 * determine the interface mode and cache it in iface_ops.
1176 	 */
1177 	if (iface_node_ops[xi.node][xi.interface]) {
1178 		cvmx_helper_interface_mode_t mode;
1179 
1180 		mode = iface_node_ops[xi.node][xi.interface]->mode;
1181 		return mode;
1182 	}
1183 
1184 	/*
1185 	 * OCTEON III models
1186 	 */
1187 	if (OCTEON_IS_MODEL(OCTEON_CN70XX))
1188 		return __cvmx_get_mode_cn70xx(xi.interface);
1189 
1190 	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
1191 		return __cvmx_get_mode_cn78xx(xiface);
1192 
1193 	if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
1194 		cvmx_helper_interface_mode_t mode;
1195 
1196 		mode = __cvmx_get_mode_cnf75xx(xiface);
1197 		return mode;
1198 	}
1199 
1200 	if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
1201 		cvmx_helper_interface_mode_t mode;
1202 
1203 		mode = __cvmx_get_mode_cn73xx(xiface);
1204 		return mode;
1205 	}
1206 
1207 	/*
1208 	 * Octeon II models
1209 	 */
1210 	if (OCTEON_IS_OCTEON2())
1211 		return __cvmx_get_mode_octeon2(xi.interface);
1212 
1213 	/*
1214 	 * Octeon and Octeon Plus models
1215 	 */
1216 	if (xi.interface == 2) {
1217 		iface_ops[xi.interface] = &iface_ops_npi;
1218 	} else if (xi.interface == 3) {
1219 		iface_ops[xi.interface] = &iface_ops_dis;
1220 	} else {
1221 		union cvmx_gmxx_inf_mode mode;
1222 
1223 		mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
1224 
1225 		if (!mode.s.en)
1226 			iface_ops[xi.interface] = &iface_ops_dis;
1227 		else if (mode.s.type)
1228 			iface_ops[xi.interface] = &iface_ops_gmii;
1229 		else
1230 			iface_ops[xi.interface] = &iface_ops_rgmii;
1231 	}
1232 
1233 	return iface_ops[xi.interface]->mode;
1234 }
1235 
1236 /**
1237  * Determine the actual number of hardware ports connected to an
1238  * interface. It doesn't setup the ports or enable them.
1239  *
1240  * @param xiface Interface to enumerate
1241  *
1242  * @return The number of ports on the interface, negative on failure
1243  */
cvmx_helper_interface_enumerate(int xiface)1244 int cvmx_helper_interface_enumerate(int xiface)
1245 {
1246 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1247 	int result = 0;
1248 
1249 	cvmx_helper_interface_get_mode(xiface);
1250 	if (iface_node_ops[xi.node][xi.interface]->enumerate)
1251 		result = iface_node_ops[xi.node][xi.interface]->enumerate(xiface);
1252 
1253 	return result;
1254 }
1255 
1256 /**
1257  * This function probes an interface to determine the actual number of
1258  * hardware ports connected to it. It does some setup the ports but
1259  * doesn't enable them. The main goal here is to set the global
1260  * interface_port_count[interface] correctly. Final hardware setup of
1261  * the ports will be performed later.
1262  *
1263  * @param xiface Interface to probe
1264  *
1265  * @return Zero on success, negative on failure
1266  */
cvmx_helper_interface_probe(int xiface)1267 int cvmx_helper_interface_probe(int xiface)
1268 {
1269 	/*
1270 	 * At this stage in the game we don't want packets to be
1271 	 * moving yet.  The following probe calls should perform
1272 	 * hardware setup needed to determine port counts. Receive
1273 	 * must still be disabled.
1274 	 */
1275 	int nports;
1276 	int has_fcs;
1277 	enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE;
1278 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1279 
1280 	nports = -1;
1281 	has_fcs = 0;
1282 
1283 	cvmx_helper_interface_get_mode(xiface);
1284 	if (iface_node_ops[xi.node][xi.interface]->probe)
1285 		nports = iface_node_ops[xi.node][xi.interface]->probe(xiface);
1286 
1287 	switch (iface_node_ops[xi.node][xi.interface]->mode) {
1288 		/* These types don't support ports to IPD/PKO */
1289 	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1290 	case CVMX_HELPER_INTERFACE_MODE_PCIE:
1291 		nports = 0;
1292 		break;
1293 		/* XAUI is a single high speed port */
1294 	case CVMX_HELPER_INTERFACE_MODE_XAUI:
1295 	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1296 	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1297 	case CVMX_HELPER_INTERFACE_MODE_XFI:
1298 	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1299 	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1300 	case CVMX_HELPER_INTERFACE_MODE_MIXED:
1301 		has_fcs = 1;
1302 		padding = CVMX_PKO_PADDING_60;
1303 		break;
1304 		/*
1305 		 * RGMII/GMII/MII are all treated about the same. Most
1306 		 * functions refer to these ports as RGMII.
1307 		 */
1308 	case CVMX_HELPER_INTERFACE_MODE_RGMII:
1309 	case CVMX_HELPER_INTERFACE_MODE_GMII:
1310 		padding = CVMX_PKO_PADDING_60;
1311 		break;
1312 		/*
1313 		 * SPI4 can have 1-16 ports depending on the device at
1314 		 * the other end.
1315 		 */
1316 	case CVMX_HELPER_INTERFACE_MODE_SPI:
1317 		padding = CVMX_PKO_PADDING_60;
1318 		break;
1319 		/*
1320 		 * SGMII can have 1-4 ports depending on how many are
1321 		 * hooked up.
1322 		 */
1323 	case CVMX_HELPER_INTERFACE_MODE_SGMII:
1324 	case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1325 		padding = CVMX_PKO_PADDING_60;
1326 	case CVMX_HELPER_INTERFACE_MODE_PICMG:
1327 		has_fcs = 1;
1328 		break;
1329 		/* PCI target Network Packet Interface */
1330 	case CVMX_HELPER_INTERFACE_MODE_NPI:
1331 		break;
1332 		/*
1333 		 * Special loopback only ports. These are not the same
1334 		 * as other ports in loopback mode.
1335 		 */
1336 	case CVMX_HELPER_INTERFACE_MODE_LOOP:
1337 		break;
1338 		/* SRIO has 2^N ports, where N is number of interfaces */
1339 	case CVMX_HELPER_INTERFACE_MODE_SRIO:
1340 		break;
1341 	case CVMX_HELPER_INTERFACE_MODE_ILK:
1342 		padding = CVMX_PKO_PADDING_60;
1343 		has_fcs = 1;
1344 		break;
1345 	case CVMX_HELPER_INTERFACE_MODE_AGL:
1346 		has_fcs = 1;
1347 		break;
1348 	}
1349 
1350 	if (nports == -1)
1351 		return -1;
1352 
1353 	if (!octeon_has_feature(OCTEON_FEATURE_PKND))
1354 		has_fcs = 0;
1355 
1356 	nports = __cvmx_helper_board_interface_probe(xiface, nports);
1357 	__cvmx_helper_init_interface(xiface, nports, has_fcs, padding);
1358 	/* Make sure all global variables propagate to other cores */
1359 	CVMX_SYNCWS;
1360 
1361 	return 0;
1362 }
1363 
1364 /**
1365  * @INTERNAL
1366  * Setup backpressure.
1367  *
1368  * @return Zero on success, negative on failure
1369  */
__cvmx_helper_global_setup_backpressure(int node)1370 static int __cvmx_helper_global_setup_backpressure(int node)
1371 {
1372 	cvmx_qos_proto_t qos_proto;
1373 	cvmx_qos_pkt_mode_t qos_mode;
1374 	int port, xipdport;
1375 	unsigned int bpmask;
1376 	int interface, xiface, ports;
1377 	int num_interfaces = cvmx_helper_get_number_of_interfaces();
1378 
1379 	if (cvmx_rgmii_backpressure_dis) {
1380 		qos_proto = CVMX_QOS_PROTO_NONE;
1381 		qos_mode = CVMX_QOS_PKT_MODE_DROP;
1382 	} else {
1383 		qos_proto = CVMX_QOS_PROTO_PAUSE;
1384 		qos_mode = CVMX_QOS_PKT_MODE_HWONLY;
1385 	}
1386 
1387 	for (interface = 0; interface < num_interfaces; interface++) {
1388 		xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1389 		ports = cvmx_helper_ports_on_interface(xiface);
1390 
1391 		switch (cvmx_helper_interface_get_mode(xiface)) {
1392 		case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1393 		case CVMX_HELPER_INTERFACE_MODE_PCIE:
1394 		case CVMX_HELPER_INTERFACE_MODE_SRIO:
1395 		case CVMX_HELPER_INTERFACE_MODE_ILK:
1396 		case CVMX_HELPER_INTERFACE_MODE_NPI:
1397 		case CVMX_HELPER_INTERFACE_MODE_PICMG:
1398 			break;
1399 		case CVMX_HELPER_INTERFACE_MODE_LOOP:
1400 		case CVMX_HELPER_INTERFACE_MODE_XAUI:
1401 		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1402 		case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1403 		case CVMX_HELPER_INTERFACE_MODE_XFI:
1404 		case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1405 		case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1406 			bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
1407 			if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
1408 				for (port = 0; port < ports; port++) {
1409 					xipdport = cvmx_helper_get_ipd_port(xiface, port);
1410 					cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
1411 				}
1412 				cvmx_bgx_set_backpressure_override(xiface, bpmask);
1413 			}
1414 			break;
1415 		case CVMX_HELPER_INTERFACE_MODE_RGMII:
1416 		case CVMX_HELPER_INTERFACE_MODE_GMII:
1417 		case CVMX_HELPER_INTERFACE_MODE_SPI:
1418 		case CVMX_HELPER_INTERFACE_MODE_SGMII:
1419 		case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1420 		case CVMX_HELPER_INTERFACE_MODE_MIXED:
1421 			bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
1422 			if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
1423 				for (port = 0; port < ports; port++) {
1424 					xipdport = cvmx_helper_get_ipd_port(xiface, port);
1425 					cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
1426 				}
1427 				cvmx_bgx_set_backpressure_override(xiface, bpmask);
1428 			} else {
1429 				cvmx_gmx_set_backpressure_override(interface, bpmask);
1430 			}
1431 			break;
1432 		case CVMX_HELPER_INTERFACE_MODE_AGL:
1433 			bpmask = (cvmx_rgmii_backpressure_dis) ? 0x1 : 0;
1434 			cvmx_agl_set_backpressure_override(interface, bpmask);
1435 			break;
1436 		}
1437 	}
1438 	return 0;
1439 }
1440 
1441 /**
1442  * @INTERNAL
1443  * Verify the per port IPD backpressure is aligned properly.
1444  * @return Zero if working, non zero if misaligned
1445  */
__cvmx_helper_backpressure_is_misaligned(void)1446 int __cvmx_helper_backpressure_is_misaligned(void)
1447 {
1448 	return 0;
1449 }
1450 
1451 /**
1452  * @INTERNAL
1453  * Enable packet input/output from the hardware. This function is
1454  * called after all internal setup is complete and IPD is enabled.
1455  * After this function completes, packets will be accepted from the
1456  * hardware ports. PKO should still be disabled to make sure packets
1457  * aren't sent out partially setup hardware.
1458  *
1459  * @param xiface Interface to enable
1460  *
1461  * @return Zero on success, negative on failure
1462  */
__cvmx_helper_packet_hardware_enable(int xiface)1463 int __cvmx_helper_packet_hardware_enable(int xiface)
1464 {
1465 	int result = 0;
1466 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1467 
1468 	if (iface_node_ops[xi.node][xi.interface]->enable)
1469 		result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
1470 	result |= __cvmx_helper_board_hardware_enable(xiface);
1471 	return result;
1472 }
1473 
cvmx_helper_ipd_and_packet_input_enable(void)1474 int cvmx_helper_ipd_and_packet_input_enable(void)
1475 {
1476 	return cvmx_helper_ipd_and_packet_input_enable_node(cvmx_get_node_num());
1477 }
1478 
1479 /**
1480  * Called after all internal packet IO paths are setup. This
1481  * function enables IPD/PIP and begins packet input and output.
1482  *
1483  * @return Zero on success, negative on failure
1484  */
cvmx_helper_ipd_and_packet_input_enable_node(int node)1485 int cvmx_helper_ipd_and_packet_input_enable_node(int node)
1486 {
1487 	int num_interfaces;
1488 	int interface;
1489 	int num_ports;
1490 
1491 	if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
1492 		cvmx_helper_pki_enable(node);
1493 	} else {
1494 		/* Enable IPD */
1495 		cvmx_ipd_enable();
1496 	}
1497 
1498 	/*
1499 	 * Time to enable hardware ports packet input and output. Note
1500 	 * that at this point IPD/PIP must be fully functional and PKO
1501 	 * must be disabled .
1502 	 */
1503 	num_interfaces = cvmx_helper_get_number_of_interfaces();
1504 	for (interface = 0; interface < num_interfaces; interface++) {
1505 		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1506 
1507 		num_ports = cvmx_helper_ports_on_interface(xiface);
1508 		if (num_ports > 0)
1509 			__cvmx_helper_packet_hardware_enable(xiface);
1510 	}
1511 
1512 	/* Finally enable PKO now that the entire path is up and running */
1513 	/* enable pko */
1514 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1515 		; // cvmx_pko_enable_78xx(0); already enabled
1516 	else
1517 		cvmx_pko_enable();
1518 
1519 	return 0;
1520 }
1521 
1522 /**
1523  * Initialize the PIP, IPD, and PKO hardware to support
1524  * simple priority based queues for the ethernet ports. Each
1525  * port is configured with a number of priority queues based
1526  * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1527  * priority than the previous.
1528  *
1529  * @return Zero on success, non-zero on failure
1530  */
cvmx_helper_initialize_packet_io_node(unsigned int node)1531 int cvmx_helper_initialize_packet_io_node(unsigned int node)
1532 {
1533 	int result = 0;
1534 	int interface;
1535 	int xiface;
1536 	union cvmx_l2c_cfg l2c_cfg;
1537 	union cvmx_smix_en smix_en;
1538 	const int num_interfaces = cvmx_helper_get_number_of_interfaces();
1539 
1540 	/*
1541 	 * Tell L2 to give the IOB statically higher priority compared
1542 	 * to the cores. This avoids conditions where IO blocks might
1543 	 * be starved under very high L2 loads.
1544 	 */
1545 	if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1546 		union cvmx_l2c_ctl l2c_ctl;
1547 
1548 		l2c_ctl.u64 = csr_rd_node(node, CVMX_L2C_CTL);
1549 		l2c_ctl.s.rsp_arb_mode = 1;
1550 		l2c_ctl.s.xmc_arb_mode = 0;
1551 		csr_wr_node(node, CVMX_L2C_CTL, l2c_ctl.u64);
1552 	} else {
1553 		l2c_cfg.u64 = csr_rd(CVMX_L2C_CFG);
1554 		l2c_cfg.s.lrf_arb_mode = 0;
1555 		l2c_cfg.s.rfb_arb_mode = 0;
1556 		csr_wr(CVMX_L2C_CFG, l2c_cfg.u64);
1557 	}
1558 
1559 	int smi_inf;
1560 	int i;
1561 
1562 	/* Newer chips have more than one SMI/MDIO interface */
1563 	if (OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN78XX))
1564 		smi_inf = 4;
1565 	else if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
1566 		smi_inf = 2;
1567 	else
1568 		smi_inf = 2;
1569 
1570 	for (i = 0; i < smi_inf; i++) {
1571 		/* Make sure SMI/MDIO is enabled so we can query PHYs */
1572 		smix_en.u64 = csr_rd_node(node, CVMX_SMIX_EN(i));
1573 		if (!smix_en.s.en) {
1574 			smix_en.s.en = 1;
1575 			csr_wr_node(node, CVMX_SMIX_EN(i), smix_en.u64);
1576 		}
1577 	}
1578 
1579 	//vinita_to_do ask it need to be modify for multinode
1580 	__cvmx_helper_init_port_valid();
1581 
1582 	for (interface = 0; interface < num_interfaces; interface++) {
1583 		xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1584 		result |= cvmx_helper_interface_probe(xiface);
1585 	}
1586 
1587 	/* PKO3 init precedes that of interfaces */
1588 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1589 		__cvmx_helper_init_port_config_data(node);
1590 		result = cvmx_helper_pko3_init_global(node);
1591 	} else {
1592 		result = cvmx_helper_pko_init();
1593 	}
1594 
1595 	/* Errata SSO-29000, Disabling power saving SSO conditional clocking */
1596 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1597 		cvmx_sso_ws_cfg_t cfg;
1598 
1599 		cfg.u64 = csr_rd_node(node, CVMX_SSO_WS_CFG);
1600 		cfg.s.sso_cclk_dis = 1;
1601 		csr_wr_node(node, CVMX_SSO_WS_CFG, cfg.u64);
1602 	}
1603 
1604 	if (result < 0)
1605 		return result;
1606 
1607 	for (interface = 0; interface < num_interfaces; interface++) {
1608 		xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1609 		/* Skip invalid/disabled interfaces */
1610 		if (cvmx_helper_ports_on_interface(xiface) <= 0)
1611 			continue;
1612 		printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
1613 		       cvmx_helper_ports_on_interface(xiface),
1614 		       cvmx_helper_interface_mode_to_string(
1615 			       cvmx_helper_interface_get_mode(xiface)));
1616 
1617 		result |= __cvmx_helper_ipd_setup_interface(xiface);
1618 		if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1619 			result |= cvmx_helper_pko3_init_interface(xiface);
1620 		else
1621 			result |= __cvmx_helper_interface_setup_pko(interface);
1622 	}
1623 
1624 	if (octeon_has_feature(OCTEON_FEATURE_PKI))
1625 		result |= __cvmx_helper_pki_global_setup(node);
1626 	else
1627 		result |= __cvmx_helper_ipd_global_setup();
1628 
1629 	/* Enable any flow control and backpressure */
1630 	result |= __cvmx_helper_global_setup_backpressure(node);
1631 
1632 	/* export app config if set */
1633 	if (cvmx_export_app_config)
1634 		result |= (*cvmx_export_app_config)();
1635 
1636 	if (cvmx_ipd_cfg.ipd_enable && cvmx_pki_dflt_init[node])
1637 		result |= cvmx_helper_ipd_and_packet_input_enable_node(node);
1638 	return result;
1639 }
1640 
1641 /**
1642  * Initialize the PIP, IPD, and PKO hardware to support
1643  * simple priority based queues for the ethernet ports. Each
1644  * port is configured with a number of priority queues based
1645  * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1646  * priority than the previous.
1647  *
1648  * @return Zero on success, non-zero on failure
1649  */
cvmx_helper_initialize_packet_io_global(void)1650 int cvmx_helper_initialize_packet_io_global(void)
1651 {
1652 	unsigned int node = cvmx_get_node_num();
1653 
1654 	return cvmx_helper_initialize_packet_io_node(node);
1655 }
1656 
1657 /**
1658  * Does core local initialization for packet io
1659  *
1660  * @return Zero on success, non-zero on failure
1661  */
cvmx_helper_initialize_packet_io_local(void)1662 int cvmx_helper_initialize_packet_io_local(void)
1663 {
1664 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1665 		__cvmx_pko3_dq_table_setup();
1666 
1667 	return 0;
1668 }
1669 
1670 struct cvmx_buffer_list {
1671 	struct cvmx_buffer_list *next;
1672 };
1673 
1674 /**
1675  * Disables the sending of flow control (pause) frames on the specified
1676  * GMX port(s).
1677  *
1678  * @param interface Which interface (0 or 1)
1679  * @param port_mask Mask (4bits) of which ports on the interface to disable
1680  *                  backpressure on.
1681  *                  1 => disable backpressure
1682  *                  0 => enable backpressure
1683  *
1684  * @return 0 on success
1685  *         -1 on error
1686  */
cvmx_gmx_set_backpressure_override(u32 interface,uint32_t port_mask)1687 int cvmx_gmx_set_backpressure_override(u32 interface, uint32_t port_mask)
1688 {
1689 	union cvmx_gmxx_tx_ovr_bp gmxx_tx_ovr_bp;
1690 	/* Check for valid arguments */
1691 	if (port_mask & ~0xf || interface & ~0x1)
1692 		return -1;
1693 	if (interface >= CVMX_HELPER_MAX_GMX)
1694 		return -1;
1695 
1696 	gmxx_tx_ovr_bp.u64 = 0;
1697 	gmxx_tx_ovr_bp.s.en = port_mask;       /* Per port Enable back pressure override */
1698 	gmxx_tx_ovr_bp.s.ign_full = port_mask; /* Ignore the RX FIFO full when computing BP */
1699 	csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmxx_tx_ovr_bp.u64);
1700 	return 0;
1701 }
1702 
1703 /**
1704  * Disables the sending of flow control (pause) frames on the specified
1705  * AGL (RGMII) port(s).
1706  *
1707  * @param interface Which interface (0 or 1)
1708  * @param port_mask Mask (4bits) of which ports on the interface to disable
1709  *                  backpressure on.
1710  *                  1 => disable backpressure
1711  *                  0 => enable backpressure
1712  *
1713  * @return 0 on success
1714  *         -1 on error
1715  */
cvmx_agl_set_backpressure_override(u32 interface,uint32_t port_mask)1716 int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask)
1717 {
1718 	union cvmx_agl_gmx_tx_ovr_bp agl_gmx_tx_ovr_bp;
1719 	int port = cvmx_helper_agl_get_port(interface);
1720 
1721 	if (port == -1)
1722 		return -1;
1723 	/* Check for valid arguments */
1724 	agl_gmx_tx_ovr_bp.u64 = 0;
1725 	/* Per port Enable back pressure override */
1726 	agl_gmx_tx_ovr_bp.s.en = port_mask;
1727 	/* Ignore the RX FIFO full when computing BP */
1728 	agl_gmx_tx_ovr_bp.s.ign_full = port_mask;
1729 	csr_wr(CVMX_GMXX_TX_OVR_BP(port), agl_gmx_tx_ovr_bp.u64);
1730 	return 0;
1731 }
1732 
1733 /**
1734  * Helper function for global packet IO shutdown
1735  */
cvmx_helper_shutdown_packet_io_global_cn78xx(int node)1736 int cvmx_helper_shutdown_packet_io_global_cn78xx(int node)
1737 {
1738 	int num_interfaces = cvmx_helper_get_number_of_interfaces();
1739 	cvmx_wqe_t *work;
1740 	int interface;
1741 	int result = 0;
1742 
1743 	/* Shut down all interfaces and disable TX and RX on all ports */
1744 	for (interface = 0; interface < num_interfaces; interface++) {
1745 		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1746 		int index;
1747 		int num_ports = cvmx_helper_ports_on_interface(xiface);
1748 
1749 		if (num_ports > 4)
1750 			num_ports = 4;
1751 
1752 		cvmx_bgx_set_backpressure_override(xiface, 0);
1753 		for (index = 0; index < num_ports; index++) {
1754 			cvmx_helper_link_info_t link_info;
1755 
1756 			if (!cvmx_helper_is_port_valid(xiface, index))
1757 				continue;
1758 
1759 			cvmx_helper_bgx_shutdown_port(xiface, index);
1760 
1761 			/* Turn off link LEDs */
1762 			link_info.u64 = 0;
1763 			cvmx_helper_update_link_led(xiface, index, link_info);
1764 		}
1765 	}
1766 
1767 	/* Stop input first */
1768 	cvmx_helper_pki_shutdown(node);
1769 
1770 	/* Retrieve all packets from the SSO and free them */
1771 	result = 0;
1772 	while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
1773 		cvmx_helper_free_pki_pkt_data(work);
1774 		cvmx_wqe_pki_free(work);
1775 		result++;
1776 	}
1777 
1778 	if (result > 0)
1779 		debug("%s: Purged %d packets from SSO\n", __func__, result);
1780 
1781 	/*
1782 	 * No need to wait for PKO queues to drain,
1783 	 * dq_close() drains the queues to NULL.
1784 	 */
1785 
1786 	/* Shutdown PKO interfaces */
1787 	for (interface = 0; interface < num_interfaces; interface++) {
1788 		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1789 
1790 		cvmx_helper_pko3_shut_interface(xiface);
1791 	}
1792 
1793 	/* Disable MAC address filtering */
1794 	for (interface = 0; interface < num_interfaces; interface++) {
1795 		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1796 
1797 		switch (cvmx_helper_interface_get_mode(xiface)) {
1798 		case CVMX_HELPER_INTERFACE_MODE_XAUI:
1799 		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1800 		case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1801 		case CVMX_HELPER_INTERFACE_MODE_XFI:
1802 		case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1803 		case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1804 		case CVMX_HELPER_INTERFACE_MODE_SGMII:
1805 		case CVMX_HELPER_INTERFACE_MODE_MIXED: {
1806 			int index;
1807 			int num_ports = cvmx_helper_ports_on_interface(xiface);
1808 
1809 			for (index = 0; index < num_ports; index++) {
1810 				if (!cvmx_helper_is_port_valid(xiface, index))
1811 					continue;
1812 
1813 				/* Reset MAC filtering */
1814 				cvmx_helper_bgx_rx_adr_ctl(node, interface, index, 0, 0, 0);
1815 			}
1816 			break;
1817 		}
1818 		default:
1819 			break;
1820 		}
1821 	}
1822 
1823 	for (interface = 0; interface < num_interfaces; interface++) {
1824 		int index;
1825 		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1826 		int num_ports = cvmx_helper_ports_on_interface(xiface);
1827 
1828 		for (index = 0; index < num_ports; index++) {
1829 			/* Doing this twice should clear it since no packets
1830 			 * can be received.
1831 			 */
1832 			cvmx_update_rx_activity_led(xiface, index, false);
1833 			cvmx_update_rx_activity_led(xiface, index, false);
1834 		}
1835 	}
1836 
1837 	/* Shutdown the PKO unit */
1838 	result = cvmx_helper_pko3_shutdown(node);
1839 
1840 	/* Release interface structures */
1841 	__cvmx_helper_shutdown_interfaces();
1842 
1843 	return result;
1844 }
1845 
1846 /**
1847  * Undo the initialization performed in
1848  * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
1849  * local version on each core, packet IO for Octeon will be disabled and placed
1850  * in the initial reset state. It will then be safe to call the initialize
1851  * later on. Note that this routine does not empty the FPA pools. It frees all
1852  * buffers used by the packet IO hardware to the FPA so a function emptying the
1853  * FPA after shutdown should find all packet buffers in the FPA.
1854  *
1855  * @return Zero on success, negative on failure.
1856  */
cvmx_helper_shutdown_packet_io_global(void)1857 int cvmx_helper_shutdown_packet_io_global(void)
1858 {
1859 	const int timeout = 5; /* Wait up to 5 seconds for timeouts */
1860 	int result = 0;
1861 	int num_interfaces = cvmx_helper_get_number_of_interfaces();
1862 	int interface;
1863 	int num_ports;
1864 	int index;
1865 	struct cvmx_buffer_list *pool0_buffers;
1866 	struct cvmx_buffer_list *pool0_buffers_tail;
1867 	cvmx_wqe_t *work;
1868 	union cvmx_ipd_ctl_status ipd_ctl_status;
1869 	int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
1870 	int node = cvmx_get_node_num();
1871 	cvmx_pcsx_mrx_control_reg_t control_reg;
1872 
1873 	if (octeon_has_feature(OCTEON_FEATURE_BGX))
1874 		return cvmx_helper_shutdown_packet_io_global_cn78xx(node);
1875 
1876 	/* Step 1: Disable all backpressure */
1877 	for (interface = 0; interface < num_interfaces; interface++) {
1878 		cvmx_helper_interface_mode_t mode =
1879 			cvmx_helper_interface_get_mode(interface);
1880 
1881 		if (mode == CVMX_HELPER_INTERFACE_MODE_AGL)
1882 			cvmx_agl_set_backpressure_override(interface, 0x1);
1883 		else if (mode != CVMX_HELPER_INTERFACE_MODE_DISABLED)
1884 			cvmx_gmx_set_backpressure_override(interface, 0xf);
1885 	}
1886 
1887 	/* Step 2: Wait for the PKO queues to drain */
1888 	result = __cvmx_helper_pko_drain();
1889 	if (result < 0) {
1890 		debug("WARNING: %s: Failed to drain some PKO queues\n",
1891 		      __func__);
1892 	}
1893 
1894 	/* Step 3: Disable TX and RX on all ports */
1895 	for (interface = 0; interface < num_interfaces; interface++) {
1896 		int xiface = cvmx_helper_node_interface_to_xiface(node,
1897 								  interface);
1898 
1899 		switch (cvmx_helper_interface_get_mode(interface)) {
1900 		case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1901 		case CVMX_HELPER_INTERFACE_MODE_PCIE:
1902 			/* Not a packet interface */
1903 			break;
1904 		case CVMX_HELPER_INTERFACE_MODE_NPI:
1905 		case CVMX_HELPER_INTERFACE_MODE_SRIO:
1906 		case CVMX_HELPER_INTERFACE_MODE_ILK:
1907 			/*
1908 			 * We don't handle the NPI/NPEI/SRIO packet
1909 			 * engines. The caller must know these are
1910 			 * idle.
1911 			 */
1912 			break;
1913 		case CVMX_HELPER_INTERFACE_MODE_LOOP:
1914 			/*
1915 			 * Nothing needed. Once PKO is idle, the
1916 			 * loopback devices must be idle.
1917 			 */
1918 			break;
1919 		case CVMX_HELPER_INTERFACE_MODE_SPI:
1920 			/*
1921 			 * SPI cannot be disabled from Octeon. It is
1922 			 * the responsibility of the caller to make
1923 			 * sure SPI is idle before doing shutdown.
1924 			 *
1925 			 * Fall through and do the same processing as
1926 			 * RGMII/GMII.
1927 			 */
1928 			fallthrough;
1929 		case CVMX_HELPER_INTERFACE_MODE_GMII:
1930 		case CVMX_HELPER_INTERFACE_MODE_RGMII:
1931 			/* Disable outermost RX at the ASX block */
1932 			csr_wr(CVMX_ASXX_RX_PRT_EN(interface), 0);
1933 			num_ports = cvmx_helper_ports_on_interface(xiface);
1934 			if (num_ports > 4)
1935 				num_ports = 4;
1936 			for (index = 0; index < num_ports; index++) {
1937 				union cvmx_gmxx_prtx_cfg gmx_cfg;
1938 
1939 				if (!cvmx_helper_is_port_valid(interface, index))
1940 					continue;
1941 				gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
1942 				gmx_cfg.s.en = 0;
1943 				csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
1944 				/* Poll the GMX state machine waiting for it to become idle */
1945 				csr_wr(CVMX_NPI_DBG_SELECT,
1946 				       interface * 0x800 + index * 0x100 + 0x880);
1947 				if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
1948 							  data & 7, ==, 0, timeout * 1000000)) {
1949 					debug("GMX RX path timeout waiting for idle\n");
1950 					result = -1;
1951 				}
1952 				if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
1953 							  data & 0xf, ==, 0, timeout * 1000000)) {
1954 					debug("GMX TX path timeout waiting for idle\n");
1955 					result = -1;
1956 				}
1957 			}
1958 			/* Disable outermost TX at the ASX block */
1959 			csr_wr(CVMX_ASXX_TX_PRT_EN(interface), 0);
1960 			/* Disable interrupts for interface */
1961 			csr_wr(CVMX_ASXX_INT_EN(interface), 0);
1962 			csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0);
1963 			break;
1964 		case CVMX_HELPER_INTERFACE_MODE_XAUI:
1965 		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1966 		case CVMX_HELPER_INTERFACE_MODE_SGMII:
1967 		case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1968 		case CVMX_HELPER_INTERFACE_MODE_PICMG:
1969 			num_ports = cvmx_helper_ports_on_interface(xiface);
1970 			if (num_ports > 4)
1971 				num_ports = 4;
1972 			for (index = 0; index < num_ports; index++) {
1973 				union cvmx_gmxx_prtx_cfg gmx_cfg;
1974 
1975 				if (!cvmx_helper_is_port_valid(interface, index))
1976 					continue;
1977 				gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
1978 				gmx_cfg.s.en = 0;
1979 				csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
1980 				if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
1981 							  union cvmx_gmxx_prtx_cfg, rx_idle, ==, 1,
1982 							  timeout * 1000000)) {
1983 					debug("GMX RX path timeout waiting for idle\n");
1984 					result = -1;
1985 				}
1986 				if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
1987 							  union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
1988 							  timeout * 1000000)) {
1989 					debug("GMX TX path timeout waiting for idle\n");
1990 					result = -1;
1991 				}
1992 				/* For SGMII some PHYs require that the PCS
1993 				 * interface be powered down and reset (i.e.
1994 				 * Atheros/Qualcomm PHYs).
1995 				 */
1996 				if (cvmx_helper_interface_get_mode(interface) ==
1997 				    CVMX_HELPER_INTERFACE_MODE_SGMII) {
1998 					u64 reg;
1999 
2000 					reg = CVMX_PCSX_MRX_CONTROL_REG(index, interface);
2001 					/* Power down the interface */
2002 					control_reg.u64 = csr_rd(reg);
2003 					control_reg.s.pwr_dn = 1;
2004 					csr_wr(reg, control_reg.u64);
2005 					csr_rd(reg);
2006 				}
2007 			}
2008 			break;
2009 		case CVMX_HELPER_INTERFACE_MODE_AGL: {
2010 			int port = cvmx_helper_agl_get_port(interface);
2011 			union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
2012 
2013 			agl_gmx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
2014 			agl_gmx_cfg.s.en = 0;
2015 			csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
2016 			if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
2017 						  union cvmx_agl_gmx_prtx_cfg, rx_idle, ==, 1,
2018 						  timeout * 1000000)) {
2019 				debug("AGL RX path timeout waiting for idle\n");
2020 				result = -1;
2021 			}
2022 			if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
2023 						  union cvmx_agl_gmx_prtx_cfg, tx_idle, ==, 1,
2024 						  timeout * 1000000)) {
2025 				debug("AGL TX path timeout waiting for idle\n");
2026 				result = -1;
2027 			}
2028 		} break;
2029 		default:
2030 			break;
2031 		}
2032 	}
2033 
2034 	/* Step 4: Retrieve all packets from the POW and free them */
2035 	while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
2036 		cvmx_helper_free_packet_data(work);
2037 		cvmx_fpa1_free(work, wqe_pool, 0);
2038 	}
2039 
2040 	/* Step 5 */
2041 	cvmx_ipd_disable();
2042 
2043 	/*
2044 	 * Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
2045 	 * have not been reset yet
2046 	 */
2047 	__cvmx_ipd_free_ptr();
2048 
2049 	/* Step 7: Free the PKO command buffers and put PKO in reset */
2050 	cvmx_pko_shutdown();
2051 
2052 	/* Step 8: Disable MAC address filtering */
2053 	for (interface = 0; interface < num_interfaces; interface++) {
2054 		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
2055 
2056 		switch (cvmx_helper_interface_get_mode(interface)) {
2057 		case CVMX_HELPER_INTERFACE_MODE_DISABLED:
2058 		case CVMX_HELPER_INTERFACE_MODE_PCIE:
2059 		case CVMX_HELPER_INTERFACE_MODE_SRIO:
2060 		case CVMX_HELPER_INTERFACE_MODE_ILK:
2061 		case CVMX_HELPER_INTERFACE_MODE_NPI:
2062 		case CVMX_HELPER_INTERFACE_MODE_LOOP:
2063 			break;
2064 		case CVMX_HELPER_INTERFACE_MODE_XAUI:
2065 		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
2066 		case CVMX_HELPER_INTERFACE_MODE_GMII:
2067 		case CVMX_HELPER_INTERFACE_MODE_RGMII:
2068 		case CVMX_HELPER_INTERFACE_MODE_SPI:
2069 		case CVMX_HELPER_INTERFACE_MODE_SGMII:
2070 		case CVMX_HELPER_INTERFACE_MODE_QSGMII:
2071 		case CVMX_HELPER_INTERFACE_MODE_PICMG:
2072 			num_ports = cvmx_helper_ports_on_interface(xiface);
2073 			if (num_ports > 4)
2074 				num_ports = 4;
2075 			for (index = 0; index < num_ports; index++) {
2076 				if (!cvmx_helper_is_port_valid(interface, index))
2077 					continue;
2078 				csr_wr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
2079 				csr_wr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
2080 				csr_wr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
2081 				csr_wr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
2082 				csr_wr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
2083 				csr_wr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
2084 				csr_wr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
2085 				csr_wr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
2086 			}
2087 			break;
2088 		case CVMX_HELPER_INTERFACE_MODE_AGL: {
2089 			int port = cvmx_helper_agl_get_port(interface);
2090 
2091 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CTL(port), 1);
2092 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
2093 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), 0);
2094 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), 0);
2095 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), 0);
2096 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), 0);
2097 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), 0);
2098 			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), 0);
2099 		} break;
2100 		default:
2101 			break;
2102 		}
2103 	}
2104 
2105 	/*
2106 	 * Step 9: Drain all FPA buffers out of pool 0 before we reset
2107 	 * IPD/PIP.  This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
2108 	 * sync. We temporarily keep the buffers in the pool0_buffers
2109 	 * list.
2110 	 */
2111 	pool0_buffers = NULL;
2112 	pool0_buffers_tail = NULL;
2113 	while (1) {
2114 		struct cvmx_buffer_list *buffer = cvmx_fpa1_alloc(0);
2115 
2116 		if (buffer) {
2117 			buffer->next = NULL;
2118 
2119 			if (!pool0_buffers)
2120 				pool0_buffers = buffer;
2121 			else
2122 				pool0_buffers_tail->next = buffer;
2123 
2124 			pool0_buffers_tail = buffer;
2125 		} else {
2126 			break;
2127 		}
2128 	}
2129 
2130 	/* Step 10: Reset IPD and PIP */
2131 	ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
2132 	ipd_ctl_status.s.reset = 1;
2133 	csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
2134 
2135 	/* Make sure IPD has finished reset. */
2136 	if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
2137 		if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, union cvmx_ipd_ctl_status, rst_done,
2138 					  ==, 0, 1000)) {
2139 			debug("IPD reset timeout waiting for idle\n");
2140 			result = -1;
2141 		}
2142 	}
2143 
2144 	/* Step 11: Restore the FPA buffers into pool 0 */
2145 	while (pool0_buffers) {
2146 		struct cvmx_buffer_list *n = pool0_buffers->next;
2147 
2148 		cvmx_fpa1_free(pool0_buffers, 0, 0);
2149 		pool0_buffers = n;
2150 	}
2151 
2152 	/* Step 12: Release interface structures */
2153 	__cvmx_helper_shutdown_interfaces();
2154 
2155 	return result;
2156 }
2157 
2158 /**
2159  * Does core local shutdown of packet io
2160  *
2161  * @return Zero on success, non-zero on failure
2162  */
cvmx_helper_shutdown_packet_io_local(void)2163 int cvmx_helper_shutdown_packet_io_local(void)
2164 {
2165 	/*
2166 	 * Currently there is nothing to do per core. This may change
2167 	 * in the future.
2168 	 */
2169 	return 0;
2170 }
2171 
2172 /**
2173  * Auto configure an IPD/PKO port link state and speed. This
2174  * function basically does the equivalent of:
2175  * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
2176  *
2177  * @param xipd_port IPD/PKO port to auto configure
2178  *
2179  * @return Link state after configure
2180  */
cvmx_helper_link_autoconf(int xipd_port)2181 cvmx_helper_link_info_t cvmx_helper_link_autoconf(int xipd_port)
2182 {
2183 	cvmx_helper_link_info_t link_info;
2184 	int xiface = cvmx_helper_get_interface_num(xipd_port);
2185 	int index = cvmx_helper_get_interface_index_num(xipd_port);
2186 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2187 	int interface = xi.interface;
2188 
2189 	if (interface == -1 || index == -1 || index >= cvmx_helper_ports_on_interface(xiface)) {
2190 		link_info.u64 = 0;
2191 		return link_info;
2192 	}
2193 
2194 	link_info = cvmx_helper_link_get(xipd_port);
2195 	if (link_info.u64 == (__cvmx_helper_get_link_info(xiface, index)).u64)
2196 		return link_info;
2197 
2198 	if (!link_info.s.link_up)
2199 		cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
2200 
2201 	/* If we fail to set the link speed, port_link_info will not change */
2202 	cvmx_helper_link_set(xipd_port, link_info);
2203 
2204 	if (link_info.s.link_up)
2205 		cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
2206 
2207 	return link_info;
2208 }
2209 
2210 /**
2211  * Return the link state of an IPD/PKO port as returned by
2212  * auto negotiation. The result of this function may not match
2213  * Octeon's link config if auto negotiation has changed since
2214  * the last call to cvmx_helper_link_set().
2215  *
2216  * @param xipd_port IPD/PKO port to query
2217  *
2218  * @return Link state
2219  */
cvmx_helper_link_get(int xipd_port)2220 cvmx_helper_link_info_t cvmx_helper_link_get(int xipd_port)
2221 {
2222 	cvmx_helper_link_info_t result;
2223 	int xiface = cvmx_helper_get_interface_num(xipd_port);
2224 	int index = cvmx_helper_get_interface_index_num(xipd_port);
2225 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2226 	struct cvmx_fdt_sfp_info *sfp_info;
2227 
2228 	/*
2229 	 * The default result will be a down link unless the code
2230 	 * below changes it.
2231 	 */
2232 	result.u64 = 0;
2233 
2234 	if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
2235 	    index >= cvmx_helper_ports_on_interface(xiface)) {
2236 		return result;
2237 	}
2238 
2239 	if (iface_node_ops[xi.node][xi.interface]->link_get)
2240 		result = iface_node_ops[xi.node][xi.interface]->link_get(xipd_port);
2241 
2242 	if (xipd_port >= 0) {
2243 		cvmx_helper_update_link_led(xiface, index, result);
2244 
2245 		sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
2246 
2247 		while (sfp_info) {
2248 			if ((!result.s.link_up || (result.s.link_up && sfp_info->last_mod_abs)))
2249 				cvmx_sfp_check_mod_abs(sfp_info, sfp_info->mod_abs_data);
2250 			sfp_info = sfp_info->next_iface_sfp;
2251 		}
2252 	}
2253 
2254 	return result;
2255 }
2256 
2257 /**
2258  * Configure an IPD/PKO port for the specified link state. This
2259  * function does not influence auto negotiation at the PHY level.
2260  * The passed link state must always match the link state returned
2261  * by cvmx_helper_link_get(). It is normally best to use
2262  * cvmx_helper_link_autoconf() instead.
2263  *
2264  * @param xipd_port  IPD/PKO port to configure
2265  * @param link_info The new link state
2266  *
2267  * @return Zero on success, negative on failure
2268  */
cvmx_helper_link_set(int xipd_port,cvmx_helper_link_info_t link_info)2269 int cvmx_helper_link_set(int xipd_port, cvmx_helper_link_info_t link_info)
2270 {
2271 	int result = -1;
2272 	int xiface = cvmx_helper_get_interface_num(xipd_port);
2273 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2274 	int index = cvmx_helper_get_interface_index_num(xipd_port);
2275 
2276 	if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
2277 	    index >= cvmx_helper_ports_on_interface(xiface))
2278 		return -1;
2279 
2280 	if (iface_node_ops[xi.node][xi.interface]->link_set)
2281 		result = iface_node_ops[xi.node][xi.interface]->link_set(xipd_port, link_info);
2282 
2283 	/*
2284 	 * Set the port_link_info here so that the link status is
2285 	 * updated no matter how cvmx_helper_link_set is called. We
2286 	 * don't change the value if link_set failed.
2287 	 */
2288 	if (result == 0)
2289 		__cvmx_helper_set_link_info(xiface, index, link_info);
2290 	return result;
2291 }
2292 
2293 /**
2294  * Configure a port for internal and/or external loopback. Internal loopback
2295  * causes packets sent by the port to be received by Octeon. External loopback
2296  * causes packets received from the wire to sent out again.
2297  *
2298  * @param xipd_port IPD/PKO port to loopback.
2299  * @param enable_internal
2300  *                 Non zero if you want internal loopback
2301  * @param enable_external
2302  *                 Non zero if you want external loopback
2303  *
2304  * @return Zero on success, negative on failure.
2305  */
cvmx_helper_configure_loopback(int xipd_port,int enable_internal,int enable_external)2306 int cvmx_helper_configure_loopback(int xipd_port, int enable_internal, int enable_external)
2307 {
2308 	int result = -1;
2309 	int xiface = cvmx_helper_get_interface_num(xipd_port);
2310 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2311 	int index = cvmx_helper_get_interface_index_num(xipd_port);
2312 
2313 	if (index >= cvmx_helper_ports_on_interface(xiface))
2314 		return -1;
2315 
2316 	cvmx_helper_interface_get_mode(xiface);
2317 	if (iface_node_ops[xi.node][xi.interface]->loopback)
2318 		result = iface_node_ops[xi.node][xi.interface]->loopback(xipd_port, enable_internal,
2319 									 enable_external);
2320 
2321 	return result;
2322 }
2323 
cvmx_helper_setup_simulator_io_buffer_counts(int node,int num_packet_buffers,int pko_buffers)2324 void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers, int pko_buffers)
2325 {
2326 	if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
2327 		cvmx_helper_pki_set_dflt_pool_buffer(node, num_packet_buffers);
2328 		cvmx_helper_pki_set_dflt_aura_buffer(node, num_packet_buffers);
2329 
2330 	} else {
2331 		cvmx_ipd_set_packet_pool_buffer_count(num_packet_buffers);
2332 		cvmx_ipd_set_wqe_pool_buffer_count(num_packet_buffers);
2333 		cvmx_pko_set_cmd_queue_pool_buffer_count(pko_buffers);
2334 	}
2335 }
2336 
cvmx_helper_mem_alloc(int node,uint64_t alloc_size,uint64_t align)2337 void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
2338 {
2339 	s64 paddr;
2340 
2341 	paddr = cvmx_bootmem_phy_alloc_range(alloc_size, align, cvmx_addr_on_node(node, 0ull),
2342 					     cvmx_addr_on_node(node, 0xffffffffff));
2343 	if (paddr <= 0ll) {
2344 		printf("ERROR: %s failed size %u\n", __func__, (unsigned int)alloc_size);
2345 		return NULL;
2346 	}
2347 	return cvmx_phys_to_ptr(paddr);
2348 }
2349 
cvmx_helper_mem_free(void * buffer,uint64_t size)2350 void cvmx_helper_mem_free(void *buffer, uint64_t size)
2351 {
2352 	__cvmx_bootmem_phy_free(cvmx_ptr_to_phys(buffer), size, 0);
2353 }
2354 
cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto,cvmx_qos_config_t * qos_cfg)2355 int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg)
2356 {
2357 	int i;
2358 
2359 	memset(qos_cfg, 0, sizeof(cvmx_qos_config_t));
2360 	qos_cfg->pkt_mode = CVMX_QOS_PKT_MODE_HWONLY; /* Process PAUSEs in hardware only.*/
2361 	qos_cfg->pool_mode = CVMX_QOS_POOL_PER_PORT;  /* One Pool per BGX:LMAC.*/
2362 	qos_cfg->pktbuf_size = 2048;		      /* Fit WQE + MTU in one buffer.*/
2363 	qos_cfg->aura_size = 1024;	/* 1K buffers typically enough for any application.*/
2364 	qos_cfg->pko_pfc_en = 1;	/* Enable PKO layout for PFC feature. */
2365 	qos_cfg->vlan_num = 1;		/* For Stacked VLAN, use 2nd VLAN in the QPG algorithm.*/
2366 	qos_cfg->qos_proto = qos_proto; /* Use PFC flow-control protocol.*/
2367 	qos_cfg->qpg_base = -1;		/* QPG Table index is undefined.*/
2368 	qos_cfg->p_time = 0x60;		/* PAUSE packets time window.*/
2369 	qos_cfg->p_interval = 0x10;	/* PAUSE packets interval.*/
2370 	for (i = 0; i < CVMX_QOS_NUM; i++) {
2371 		qos_cfg->groups[i] = i;	      /* SSO Groups = 0...7 */
2372 		qos_cfg->group_prio[i] = i;   /* SSO Group priority = QOS. */
2373 		qos_cfg->drop_thresh[i] = 99; /* 99% of the Aura size.*/
2374 		qos_cfg->red_thresh[i] = 90;  /* 90% of the Aura size.*/
2375 		qos_cfg->bp_thresh[i] = 70;   /* 70% of the Aura size.*/
2376 	}
2377 	return 0;
2378 }
2379 
cvmx_helper_qos_port_config_update(int xipdport,cvmx_qos_config_t * qos_cfg)2380 int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg)
2381 {
2382 	cvmx_user_static_pko_queue_config_t pkocfg;
2383 	cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
2384 	int xiface = cvmx_helper_get_interface_num(xipdport);
2385 	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
2386 
2387 	/* Configure PKO port for PFC SQ layout: */
2388 	cvmx_helper_pko_queue_config_get(xp.node, &pkocfg);
2389 	pkocfg.pknd.pko_cfg_iface[xi.interface].pfc_enable = 1;
2390 	cvmx_helper_pko_queue_config_set(xp.node, &pkocfg);
2391 	return 0;
2392 }
2393 
cvmx_helper_qos_port_setup(int xipdport,cvmx_qos_config_t * qos_cfg)2394 int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
2395 {
2396 	const int channles = CVMX_QOS_NUM;
2397 	int bufsize = qos_cfg->pktbuf_size;
2398 	int aura_size = qos_cfg->aura_size;
2399 	cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
2400 	int node = xp.node;
2401 	int ipdport = xp.port;
2402 	int port = cvmx_helper_get_interface_index_num(xp.port);
2403 	int xiface = cvmx_helper_get_interface_num(xipdport);
2404 	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
2405 	cvmx_fpa3_pool_t gpool;
2406 	cvmx_fpa3_gaura_t gaura;
2407 	cvmx_bgxx_cmr_rx_ovr_bp_t ovrbp;
2408 	struct cvmx_pki_qpg_config qpgcfg;
2409 	struct cvmx_pki_style_config stcfg, stcfg_dflt;
2410 	struct cvmx_pki_pkind_config pkcfg;
2411 	int chan, bpid, group, qpg;
2412 	int bpen, reden, dropen, passthr, dropthr, bpthr;
2413 	int nbufs, pkind, style;
2414 	char name[32];
2415 
2416 	if (qos_cfg->pool_mode == CVMX_QOS_POOL_PER_PORT) {
2417 		/* Allocate and setup packet Pool: */
2418 		nbufs = aura_size * channles;
2419 		sprintf(name, "QOS.P%d", ipdport);
2420 		gpool = cvmx_fpa3_setup_fill_pool(node, -1 /*auto*/, name, bufsize, nbufs, NULL);
2421 		if (!__cvmx_fpa3_pool_valid(gpool)) {
2422 			printf("%s: Failed to setup FPA Pool\n", __func__);
2423 			return -1;
2424 		}
2425 		for (chan = 0; chan < channles; chan++)
2426 			qos_cfg->gpools[chan] = gpool;
2427 	} else {
2428 		printf("%s: Invalid pool_mode %d\n", __func__, qos_cfg->pool_mode);
2429 		return -1;
2430 	}
2431 	/* Allocate QPG entries: */
2432 	qos_cfg->qpg_base = cvmx_pki_qpg_entry_alloc(node, -1 /*auto*/, channles);
2433 	if (qos_cfg->qpg_base < 0) {
2434 		printf("%s: Failed to allocate QPG entry\n", __func__);
2435 		return -1;
2436 	}
2437 	for (chan = 0; chan < channles; chan++) {
2438 		/* Allocate and setup Aura, setup BP threshold: */
2439 		gpool = qos_cfg->gpools[chan];
2440 		sprintf(name, "QOS.A%d", ipdport + chan);
2441 		gaura = cvmx_fpa3_set_aura_for_pool(gpool, -1 /*auto*/, name, bufsize, aura_size);
2442 		if (!__cvmx_fpa3_aura_valid(gaura)) {
2443 			printf("%s: Failed to setup FPA Aura for Channel %d\n", __func__, chan);
2444 			return -1;
2445 		}
2446 		qos_cfg->gauras[chan] = gaura;
2447 		bpen = 1;
2448 		reden = 1;
2449 		dropen = 1;
2450 		dropthr = (qos_cfg->drop_thresh[chan] * 10 * aura_size) / 1000;
2451 		passthr = (qos_cfg->red_thresh[chan] * 10 * aura_size) / 1000;
2452 		bpthr = (qos_cfg->bp_thresh[chan] * 10 * aura_size) / 1000;
2453 		cvmx_fpa3_setup_aura_qos(gaura, reden, passthr, dropthr, bpen, bpthr);
2454 		cvmx_pki_enable_aura_qos(node, gaura.laura, reden, dropen, bpen);
2455 
2456 		/* Allocate BPID, link Aura and Channel using BPID: */
2457 		bpid = cvmx_pki_bpid_alloc(node, -1 /*auto*/);
2458 		if (bpid < 0) {
2459 			printf("%s: Failed to allocate BPID for channel %d\n",
2460 			       __func__, chan);
2461 			return -1;
2462 		}
2463 		qos_cfg->bpids[chan] = bpid;
2464 		cvmx_pki_write_aura_bpid(node, gaura.laura, bpid);
2465 		cvmx_pki_write_channel_bpid(node, ipdport + chan, bpid);
2466 
2467 		/* Setup QPG entries: */
2468 		group = qos_cfg->groups[chan];
2469 		qpg = qos_cfg->qpg_base + chan;
2470 		cvmx_pki_read_qpg_entry(node, qpg, &qpgcfg);
2471 		qpgcfg.port_add = chan;
2472 		qpgcfg.aura_num = gaura.laura;
2473 		qpgcfg.grp_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
2474 		qpgcfg.grp_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
2475 		qpgcfg.grptag_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
2476 		qpgcfg.grptag_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
2477 		cvmx_pki_write_qpg_entry(node, qpg, &qpgcfg);
2478 	}
2479 	/* Allocate and setup STYLE: */
2480 	cvmx_helper_pki_get_dflt_style(node, &stcfg_dflt);
2481 	style = cvmx_pki_style_alloc(node, -1 /*auto*/);
2482 	cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
2483 	stcfg.tag_cfg = stcfg_dflt.tag_cfg;
2484 	stcfg.parm_cfg.tag_type = CVMX_POW_TAG_TYPE_ORDERED;
2485 	stcfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;
2486 	stcfg.parm_cfg.qpg_base = qos_cfg->qpg_base;
2487 	stcfg.parm_cfg.qpg_port_msb = 0;
2488 	stcfg.parm_cfg.qpg_port_sh = 0;
2489 	stcfg.parm_cfg.qpg_dis_grptag = 1;
2490 	stcfg.parm_cfg.fcs_strip = 1;
2491 	stcfg.parm_cfg.mbuff_size = bufsize - 64; /* Do not use 100% of the buffer. */
2492 	stcfg.parm_cfg.force_drop = 0;
2493 	stcfg.parm_cfg.nodrop = 0;
2494 	stcfg.parm_cfg.rawdrp = 0;
2495 	stcfg.parm_cfg.cache_mode = 2; /* 1st buffer in L2 */
2496 	stcfg.parm_cfg.wqe_vs = qos_cfg->vlan_num;
2497 	cvmx_pki_write_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
2498 
2499 	/* Setup PKIND: */
2500 	pkind = cvmx_helper_get_pknd(xiface, port);
2501 	cvmx_pki_read_pkind_config(node, pkind, &pkcfg);
2502 	pkcfg.cluster_grp = 0; /* OCTEON3 has only one cluster group = 0 */
2503 	pkcfg.initial_style = style;
2504 	pkcfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;
2505 	cvmx_pki_write_pkind_config(node, pkind, &pkcfg);
2506 
2507 	/* Setup parameters of the QOS packet and enable QOS flow-control: */
2508 	cvmx_bgx_set_pause_pkt_param(xipdport, 0, 0x0180c2000001, 0x8808, qos_cfg->p_time,
2509 				     qos_cfg->p_interval);
2510 	cvmx_bgx_set_flowctl_mode(xipdport, qos_cfg->qos_proto, qos_cfg->pkt_mode);
2511 
2512 	/* Enable PKI channel backpressure in the BGX: */
2513 	ovrbp.u64 = csr_rd_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface));
2514 	ovrbp.s.en &= ~(1 << port);
2515 	ovrbp.s.ign_fifo_bp &= ~(1 << port);
2516 	csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), ovrbp.u64);
2517 	return 0;
2518 }
2519 
cvmx_helper_qos_sso_setup(int xipdport,cvmx_qos_config_t * qos_cfg)2520 int cvmx_helper_qos_sso_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
2521 {
2522 	const int channels = CVMX_QOS_NUM;
2523 	cvmx_sso_grpx_pri_t grppri;
2524 	int chan, qos, group;
2525 	cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
2526 	int node = xp.node;
2527 
2528 	for (chan = 0; chan < channels; chan++) {
2529 		qos = cvmx_helper_qos2prio(chan);
2530 		group = qos_cfg->groups[qos];
2531 		grppri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
2532 		grppri.s.pri = qos_cfg->group_prio[chan];
2533 		csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grppri.u64);
2534 	}
2535 	return 0;
2536 }
2537 
cvmx_helper_get_chan_e_name(int chan,char * namebuf,int buflen)2538 int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen)
2539 {
2540 	int n, dpichans;
2541 
2542 	if ((unsigned int)chan >= CVMX_PKO3_IPD_NUM_MAX) {
2543 		printf("%s: Channel %d is out of range (0..4095)\n", __func__, chan);
2544 		return -1;
2545 	}
2546 	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
2547 		dpichans = 64;
2548 	else
2549 		dpichans = 128;
2550 
2551 	if (chan >= 0 && chan < 64)
2552 		n = snprintf(namebuf, buflen, "LBK%d", chan);
2553 	else if (chan >= 0x100 && chan < (0x100 + dpichans))
2554 		n = snprintf(namebuf, buflen, "DPI%d", chan - 0x100);
2555 	else if (chan == 0x200)
2556 		n = snprintf(namebuf, buflen, "NQM");
2557 	else if (chan >= 0x240 && chan < (0x240 + (1 << 1) + 2))
2558 		n = snprintf(namebuf, buflen, "SRIO%d:%d", (chan - 0x240) >> 1,
2559 			     (chan - 0x240) & 0x1);
2560 	else if (chan >= 0x400 && chan < (0x400 + (1 << 8) + 256))
2561 		n = snprintf(namebuf, buflen, "ILK%d:%d", (chan - 0x400) >> 8,
2562 			     (chan - 0x400) & 0xFF);
2563 	else if (chan >= 0x800 && chan < (0x800 + (5 << 8) + (3 << 4) + 16))
2564 		n = snprintf(namebuf, buflen, "BGX%d:%d:%d", (chan - 0x800) >> 8,
2565 			     ((chan - 0x800) >> 4) & 0x3, (chan - 0x800) & 0xF);
2566 	else
2567 		n = snprintf(namebuf, buflen, "--");
2568 	return n;
2569 }
2570 
2571 #ifdef CVMX_DUMP_DIAGNOSTICS
cvmx_helper_dump_for_diagnostics(int node)2572 void cvmx_helper_dump_for_diagnostics(int node)
2573 {
2574 	if (!(OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))) {
2575 		printf("Diagnostics are not implemented for this model\n");
2576 		return;
2577 	}
2578 #ifdef CVMX_DUMP_GSER
2579 	{
2580 		int qlm, num_qlms;
2581 
2582 		num_qlms = cvmx_qlm_get_num();
2583 		for (qlm = 0; qlm < num_qlms; qlm++) {
2584 			cvmx_dump_gser_config_node(node, qlm);
2585 			cvmx_dump_gser_status_node(node, qlm);
2586 		}
2587 	}
2588 #endif
2589 #ifdef CVMX_DUMP_BGX
2590 	{
2591 		int bgx;
2592 
2593 		for (bgx = 0; bgx < CVMX_HELPER_MAX_GMX; bgx++) {
2594 			cvmx_dump_bgx_config_node(node, bgx);
2595 			cvmx_dump_bgx_status_node(node, bgx);
2596 		}
2597 	}
2598 #endif
2599 #ifdef CVMX_DUMP_PKI
2600 	cvmx_pki_config_dump(node);
2601 	cvmx_pki_stats_dump(node);
2602 #endif
2603 #ifdef CVMX_DUMP_PKO
2604 	cvmx_helper_pko3_config_dump(node);
2605 	cvmx_helper_pko3_stats_dump(node);
2606 #endif
2607 #ifdef CVMX_DUMO_SSO
2608 	cvmx_sso_config_dump(node);
2609 #endif
2610 }
2611 #endif
2612