1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/tls.h"
36 #include "en_accel/en_accel.h"
37 
stats_grps_num(struct mlx5e_priv * priv)38 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
39 {
40 	return !priv->profile->stats_grps_num ? 0 :
41 		priv->profile->stats_grps_num(priv);
42 }
43 
mlx5e_stats_total_num(struct mlx5e_priv * priv)44 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
45 {
46 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47 	const unsigned int num_stats_grps = stats_grps_num(priv);
48 	unsigned int total = 0;
49 	int i;
50 
51 	for (i = 0; i < num_stats_grps; i++)
52 		total += stats_grps[i]->get_num_stats(priv);
53 
54 	return total;
55 }
56 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)57 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
58 {
59 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60 	const unsigned int num_stats_grps = stats_grps_num(priv);
61 	int i;
62 
63 	for (i = num_stats_grps - 1; i >= 0; i--)
64 		if (stats_grps[i]->update_stats &&
65 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
66 			stats_grps[i]->update_stats(priv);
67 }
68 
mlx5e_stats_update(struct mlx5e_priv * priv)69 void mlx5e_stats_update(struct mlx5e_priv *priv)
70 {
71 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
72 	const unsigned int num_stats_grps = stats_grps_num(priv);
73 	int i;
74 
75 	for (i = num_stats_grps - 1; i >= 0; i--)
76 		if (stats_grps[i]->update_stats)
77 			stats_grps[i]->update_stats(priv);
78 }
79 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)80 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
81 {
82 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83 	const unsigned int num_stats_grps = stats_grps_num(priv);
84 	int i;
85 
86 	for (i = 0; i < num_stats_grps; i++)
87 		idx = stats_grps[i]->fill_stats(priv, data, idx);
88 }
89 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)90 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
91 {
92 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
93 	const unsigned int num_stats_grps = stats_grps_num(priv);
94 	int i, idx = 0;
95 
96 	for (i = 0; i < num_stats_grps; i++)
97 		idx = stats_grps[i]->fill_strings(priv, data, idx);
98 }
99 
100 /* Concrete NIC Stats */
101 
102 static const struct counter_desc sw_stats_desc[] = {
103 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
104 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
105 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
106 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
107 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
108 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
109 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
110 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
111 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
115 
116 #ifdef CONFIG_MLX5_EN_TLS
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
121 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
122 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
126 #endif
127 
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
132 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
133 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
179 #ifdef CONFIG_MLX5_EN_TLS
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
189 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
190 #endif
191 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
222 };
223 
224 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
225 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)226 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
227 {
228 	return NUM_SW_COUNTERS;
229 }
230 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)231 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
232 {
233 	int i;
234 
235 	for (i = 0; i < NUM_SW_COUNTERS; i++)
236 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
237 	return idx;
238 }
239 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)240 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
241 {
242 	int i;
243 
244 	for (i = 0; i < NUM_SW_COUNTERS; i++)
245 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
246 	return idx;
247 }
248 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)249 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
250 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
251 {
252 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
253 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
254 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
255 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
256 	s->tx_xdp_full  += xdpsq_red_stats->full;
257 	s->tx_xdp_err   += xdpsq_red_stats->err;
258 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
259 }
260 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)261 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
262 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
263 {
264 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
265 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
266 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
267 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
268 	s->rx_xdp_tx_full  += xdpsq_stats->full;
269 	s->rx_xdp_tx_err   += xdpsq_stats->err;
270 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
271 }
272 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)273 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
274 						  struct mlx5e_xdpsq_stats *xsksq_stats)
275 {
276 	s->tx_xsk_xmit  += xsksq_stats->xmit;
277 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
278 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
279 	s->tx_xsk_full  += xsksq_stats->full;
280 	s->tx_xsk_err   += xsksq_stats->err;
281 	s->tx_xsk_cqes  += xsksq_stats->cqes;
282 }
283 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)284 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
285 						  struct mlx5e_rq_stats *xskrq_stats)
286 {
287 	s->rx_xsk_packets                += xskrq_stats->packets;
288 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
289 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
290 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
291 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
292 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
293 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
294 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
295 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
296 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
297 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
298 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
299 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
300 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
301 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
302 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
303 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
304 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
305 	s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
306 }
307 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)308 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
309 						     struct mlx5e_rq_stats *rq_stats)
310 {
311 	s->rx_packets                 += rq_stats->packets;
312 	s->rx_bytes                   += rq_stats->bytes;
313 	s->rx_lro_packets             += rq_stats->lro_packets;
314 	s->rx_lro_bytes               += rq_stats->lro_bytes;
315 	s->rx_ecn_mark                += rq_stats->ecn_mark;
316 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
317 	s->rx_csum_none               += rq_stats->csum_none;
318 	s->rx_csum_complete           += rq_stats->csum_complete;
319 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
320 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
321 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
322 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
323 	s->rx_xdp_drop                += rq_stats->xdp_drop;
324 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
325 	s->rx_wqe_err                 += rq_stats->wqe_err;
326 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
327 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
328 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
329 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
330 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
331 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
332 	s->rx_cache_reuse             += rq_stats->cache_reuse;
333 	s->rx_cache_full              += rq_stats->cache_full;
334 	s->rx_cache_empty             += rq_stats->cache_empty;
335 	s->rx_cache_busy              += rq_stats->cache_busy;
336 	s->rx_cache_waive             += rq_stats->cache_waive;
337 	s->rx_congst_umr              += rq_stats->congst_umr;
338 	s->rx_arfs_err                += rq_stats->arfs_err;
339 	s->rx_recover                 += rq_stats->recover;
340 #ifdef CONFIG_MLX5_EN_TLS
341 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
342 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
343 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
344 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
345 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
346 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
347 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
348 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
349 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
350 	s->rx_tls_err                 += rq_stats->tls_err;
351 #endif
352 }
353 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)354 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
355 						     struct mlx5e_ch_stats *ch_stats)
356 {
357 	s->ch_events      += ch_stats->events;
358 	s->ch_poll        += ch_stats->poll;
359 	s->ch_arm         += ch_stats->arm;
360 	s->ch_aff_change  += ch_stats->aff_change;
361 	s->ch_force_irq   += ch_stats->force_irq;
362 	s->ch_eq_rearm    += ch_stats->eq_rearm;
363 }
364 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)365 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
366 					       struct mlx5e_sq_stats *sq_stats)
367 {
368 	s->tx_packets               += sq_stats->packets;
369 	s->tx_bytes                 += sq_stats->bytes;
370 	s->tx_tso_packets           += sq_stats->tso_packets;
371 	s->tx_tso_bytes             += sq_stats->tso_bytes;
372 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
373 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
374 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
375 	s->tx_nop                   += sq_stats->nop;
376 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
377 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
378 	s->tx_queue_stopped         += sq_stats->stopped;
379 	s->tx_queue_wake            += sq_stats->wake;
380 	s->tx_queue_dropped         += sq_stats->dropped;
381 	s->tx_cqe_err               += sq_stats->cqe_err;
382 	s->tx_recover               += sq_stats->recover;
383 	s->tx_xmit_more             += sq_stats->xmit_more;
384 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
385 	s->tx_csum_none             += sq_stats->csum_none;
386 	s->tx_csum_partial          += sq_stats->csum_partial;
387 #ifdef CONFIG_MLX5_EN_TLS
388 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
389 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
390 	s->tx_tls_ooo               += sq_stats->tls_ooo;
391 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
392 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
393 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
394 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
395 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
396 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
397 #endif
398 	s->tx_cqes                  += sq_stats->cqes;
399 }
400 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)401 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
402 						struct mlx5e_sw_stats *s)
403 {
404 	int i;
405 
406 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
407 		return;
408 
409 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
410 
411 	if (priv->tx_ptp_opened) {
412 		for (i = 0; i < priv->max_opened_tc; i++) {
413 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
414 
415 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
416 			barrier();
417 		}
418 	}
419 	if (priv->rx_ptp_opened) {
420 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
421 
422 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
423 		barrier();
424 	}
425 }
426 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)427 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
428 						struct mlx5e_sw_stats *s)
429 {
430 	struct mlx5e_sq_stats **stats;
431 	u16 max_qos_sqs;
432 	int i;
433 
434 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
435 	max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
436 	stats = READ_ONCE(priv->htb.qos_sq_stats);
437 
438 	for (i = 0; i < max_qos_sqs; i++) {
439 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
440 
441 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
442 		barrier();
443 	}
444 }
445 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)446 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
447 {
448 	struct mlx5e_sw_stats *s = &priv->stats.sw;
449 	int i;
450 
451 	memset(s, 0, sizeof(*s));
452 
453 	for (i = 0; i < priv->max_nch; i++) {
454 		struct mlx5e_channel_stats *channel_stats =
455 			&priv->channel_stats[i];
456 		int j;
457 
458 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
459 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
460 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
461 		/* xdp redirect */
462 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
463 		/* AF_XDP zero-copy */
464 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
465 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
466 
467 		for (j = 0; j < priv->max_opened_tc; j++) {
468 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
469 
470 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
471 			barrier();
472 		}
473 	}
474 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
475 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
476 }
477 
478 static const struct counter_desc q_stats_desc[] = {
479 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
480 };
481 
482 static const struct counter_desc drop_rq_stats_desc[] = {
483 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
484 };
485 
486 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
487 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
488 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)489 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
490 {
491 	int num_stats = 0;
492 
493 	if (priv->q_counter)
494 		num_stats += NUM_Q_COUNTERS;
495 
496 	if (priv->drop_rq_q_counter)
497 		num_stats += NUM_DROP_RQ_COUNTERS;
498 
499 	return num_stats;
500 }
501 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)502 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
503 {
504 	int i;
505 
506 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
507 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
508 		       q_stats_desc[i].format);
509 
510 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
511 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
512 		       drop_rq_stats_desc[i].format);
513 
514 	return idx;
515 }
516 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)517 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
518 {
519 	int i;
520 
521 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
522 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
523 						   q_stats_desc, i);
524 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
525 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
526 						   drop_rq_stats_desc, i);
527 	return idx;
528 }
529 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)530 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
531 {
532 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
533 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
534 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
535 	int ret;
536 
537 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
538 
539 	if (priv->q_counter) {
540 		MLX5_SET(query_q_counter_in, in, counter_set_id,
541 			 priv->q_counter);
542 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
543 		if (!ret)
544 			qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
545 							  out, out_of_buffer);
546 	}
547 
548 	if (priv->drop_rq_q_counter) {
549 		MLX5_SET(query_q_counter_in, in, counter_set_id,
550 			 priv->drop_rq_q_counter);
551 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
552 		if (!ret)
553 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
554 							    out, out_of_buffer);
555 	}
556 }
557 
558 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
559 static const struct counter_desc vnic_env_stats_steer_desc[] = {
560 	{ "rx_steer_missed_packets",
561 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
562 };
563 
564 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
565 	{ "dev_internal_queue_oob",
566 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
567 };
568 
569 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
570 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
571 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
572 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
573 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
574 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
575 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)576 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
577 {
578 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
579 		NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
580 }
581 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)582 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
583 {
584 	int i;
585 
586 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
587 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
588 		       vnic_env_stats_steer_desc[i].format);
589 
590 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
591 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
592 		       vnic_env_stats_dev_oob_desc[i].format);
593 	return idx;
594 }
595 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)596 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
597 {
598 	int i;
599 
600 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
601 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
602 						  vnic_env_stats_steer_desc, i);
603 
604 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
605 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
606 						  vnic_env_stats_dev_oob_desc, i);
607 	return idx;
608 }
609 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)610 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
611 {
612 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
613 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
614 	struct mlx5_core_dev *mdev = priv->mdev;
615 
616 	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
617 		return;
618 
619 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
620 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
621 }
622 
623 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
624 static const struct counter_desc vport_stats_desc[] = {
625 	{ "rx_vport_unicast_packets",
626 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
627 	{ "rx_vport_unicast_bytes",
628 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
629 	{ "tx_vport_unicast_packets",
630 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
631 	{ "tx_vport_unicast_bytes",
632 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
633 	{ "rx_vport_multicast_packets",
634 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
635 	{ "rx_vport_multicast_bytes",
636 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
637 	{ "tx_vport_multicast_packets",
638 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
639 	{ "tx_vport_multicast_bytes",
640 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
641 	{ "rx_vport_broadcast_packets",
642 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
643 	{ "rx_vport_broadcast_bytes",
644 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
645 	{ "tx_vport_broadcast_packets",
646 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
647 	{ "tx_vport_broadcast_bytes",
648 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
649 	{ "rx_vport_rdma_unicast_packets",
650 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
651 	{ "rx_vport_rdma_unicast_bytes",
652 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
653 	{ "tx_vport_rdma_unicast_packets",
654 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
655 	{ "tx_vport_rdma_unicast_bytes",
656 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
657 	{ "rx_vport_rdma_multicast_packets",
658 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
659 	{ "rx_vport_rdma_multicast_bytes",
660 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
661 	{ "tx_vport_rdma_multicast_packets",
662 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
663 	{ "tx_vport_rdma_multicast_bytes",
664 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
665 };
666 
667 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
668 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)669 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
670 {
671 	return NUM_VPORT_COUNTERS;
672 }
673 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)674 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
675 {
676 	int i;
677 
678 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
679 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
680 	return idx;
681 }
682 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)683 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
684 {
685 	int i;
686 
687 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
688 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
689 						  vport_stats_desc, i);
690 	return idx;
691 }
692 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)693 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
694 {
695 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
696 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
697 	struct mlx5_core_dev *mdev = priv->mdev;
698 
699 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
700 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
701 }
702 
703 #define PPORT_802_3_OFF(c) \
704 	MLX5_BYTE_OFF(ppcnt_reg, \
705 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
706 static const struct counter_desc pport_802_3_stats_desc[] = {
707 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
708 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
709 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
710 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
711 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
712 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
713 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
714 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
715 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
716 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
717 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
718 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
719 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
720 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
721 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
722 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
723 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
724 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
725 };
726 
727 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
728 
729 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
730 {
731 	return NUM_PPORT_802_3_COUNTERS;
732 }
733 
734 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
735 {
736 	int i;
737 
738 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
739 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
740 	return idx;
741 }
742 
743 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
744 {
745 	int i;
746 
747 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
748 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
749 						  pport_802_3_stats_desc, i);
750 	return idx;
751 }
752 
753 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
754 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
755 
756 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
757 {
758 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
759 	struct mlx5_core_dev *mdev = priv->mdev;
760 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
761 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
762 	void *out;
763 
764 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
765 		return;
766 
767 	MLX5_SET(ppcnt_reg, in, local_port, 1);
768 	out = pstats->IEEE_802_3_counters;
769 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
770 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
771 }
772 
773 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
774 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
775 		MLX5_BYTE_OFF(ppcnt_reg,		\
776 			      counter_set.set.c##_high)))
777 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)778 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
779 				u32 *ppcnt_ieee_802_3)
780 {
781 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
782 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
783 
784 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
785 		return -EOPNOTSUPP;
786 
787 	MLX5_SET(ppcnt_reg, in, local_port, 1);
788 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
789 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
790 				    sz, MLX5_REG_PPCNT, 0, 0);
791 }
792 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)793 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
794 			   struct ethtool_pause_stats *pause_stats)
795 {
796 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
797 	struct mlx5_core_dev *mdev = priv->mdev;
798 
799 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
800 		return;
801 
802 	pause_stats->tx_pause_frames =
803 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
804 				      eth_802_3_cntrs_grp_data_layout,
805 				      a_pause_mac_ctrl_frames_transmitted);
806 	pause_stats->rx_pause_frames =
807 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
808 				      eth_802_3_cntrs_grp_data_layout,
809 				      a_pause_mac_ctrl_frames_received);
810 }
811 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)812 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
813 			     struct ethtool_eth_phy_stats *phy_stats)
814 {
815 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
816 	struct mlx5_core_dev *mdev = priv->mdev;
817 
818 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
819 		return;
820 
821 	phy_stats->SymbolErrorDuringCarrier =
822 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
823 				      eth_802_3_cntrs_grp_data_layout,
824 				      a_symbol_error_during_carrier);
825 }
826 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)827 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
828 			     struct ethtool_eth_mac_stats *mac_stats)
829 {
830 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
831 	struct mlx5_core_dev *mdev = priv->mdev;
832 
833 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
834 		return;
835 
836 #define RD(name)							\
837 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
838 			      eth_802_3_cntrs_grp_data_layout,		\
839 			      name)
840 
841 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
842 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
843 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
844 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
845 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
846 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
847 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
848 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
849 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
850 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
851 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
852 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
853 #undef RD
854 }
855 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)856 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
857 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
858 {
859 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
860 	struct mlx5_core_dev *mdev = priv->mdev;
861 
862 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
863 		return;
864 
865 	ctrl_stats->MACControlFramesTransmitted =
866 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
867 				      eth_802_3_cntrs_grp_data_layout,
868 				      a_mac_control_frames_transmitted);
869 	ctrl_stats->MACControlFramesReceived =
870 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
871 				      eth_802_3_cntrs_grp_data_layout,
872 				      a_mac_control_frames_received);
873 	ctrl_stats->UnsupportedOpcodesReceived =
874 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
875 				      eth_802_3_cntrs_grp_data_layout,
876 				      a_unsupported_opcodes_received);
877 }
878 
879 #define PPORT_2863_OFF(c) \
880 	MLX5_BYTE_OFF(ppcnt_reg, \
881 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
882 static const struct counter_desc pport_2863_stats_desc[] = {
883 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
884 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
885 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
886 };
887 
888 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
889 
890 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
891 {
892 	return NUM_PPORT_2863_COUNTERS;
893 }
894 
895 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
896 {
897 	int i;
898 
899 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
900 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
901 	return idx;
902 }
903 
904 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
905 {
906 	int i;
907 
908 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
909 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
910 						  pport_2863_stats_desc, i);
911 	return idx;
912 }
913 
914 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
915 {
916 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
917 	struct mlx5_core_dev *mdev = priv->mdev;
918 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
919 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
920 	void *out;
921 
922 	MLX5_SET(ppcnt_reg, in, local_port, 1);
923 	out = pstats->RFC_2863_counters;
924 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
925 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
926 }
927 
928 #define PPORT_2819_OFF(c) \
929 	MLX5_BYTE_OFF(ppcnt_reg, \
930 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
931 static const struct counter_desc pport_2819_stats_desc[] = {
932 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
933 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
934 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
935 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
936 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
937 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
938 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
939 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
940 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
941 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
942 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
943 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
944 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
945 };
946 
947 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
948 
949 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
950 {
951 	return NUM_PPORT_2819_COUNTERS;
952 }
953 
954 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
955 {
956 	int i;
957 
958 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
959 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
960 	return idx;
961 }
962 
963 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
964 {
965 	int i;
966 
967 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
968 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
969 						  pport_2819_stats_desc, i);
970 	return idx;
971 }
972 
973 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
974 {
975 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
976 	struct mlx5_core_dev *mdev = priv->mdev;
977 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
978 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
979 	void *out;
980 
981 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
982 		return;
983 
984 	MLX5_SET(ppcnt_reg, in, local_port, 1);
985 	out = pstats->RFC_2819_counters;
986 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
987 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
988 }
989 
990 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
991 	{    0,    64 },
992 	{   65,   127 },
993 	{  128,   255 },
994 	{  256,   511 },
995 	{  512,  1023 },
996 	{ 1024,  1518 },
997 	{ 1519,  2047 },
998 	{ 2048,  4095 },
999 	{ 4096,  8191 },
1000 	{ 8192, 10239 },
1001 	{}
1002 };
1003 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1004 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1005 			  struct ethtool_rmon_stats *rmon,
1006 			  const struct ethtool_rmon_hist_range **ranges)
1007 {
1008 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1009 	struct mlx5_core_dev *mdev = priv->mdev;
1010 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1011 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1012 
1013 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1014 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1015 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1016 				 sz, MLX5_REG_PPCNT, 0, 0))
1017 		return;
1018 
1019 #define RD(name)						\
1020 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1021 			      eth_2819_cntrs_grp_data_layout,	\
1022 			      name)
1023 
1024 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1025 	rmon->fragments		= RD(ether_stats_fragments);
1026 	rmon->jabbers		= RD(ether_stats_jabbers);
1027 
1028 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1029 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1030 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1031 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1032 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1033 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1034 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1035 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1036 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1037 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1038 #undef RD
1039 
1040 	*ranges = mlx5e_rmon_ranges;
1041 }
1042 
1043 #define PPORT_PHY_STATISTICAL_OFF(c) \
1044 	MLX5_BYTE_OFF(ppcnt_reg, \
1045 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1046 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1047 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1048 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1049 };
1050 
1051 static const struct counter_desc
1052 pport_phy_statistical_err_lanes_stats_desc[] = {
1053 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1054 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1055 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1056 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1057 };
1058 
1059 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1060 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1061 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1062 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1063 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1064 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1065 {
1066 	struct mlx5_core_dev *mdev = priv->mdev;
1067 	int num_stats;
1068 
1069 	/* "1" for link_down_events special counter */
1070 	num_stats = 1;
1071 
1072 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1073 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1074 
1075 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1076 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1077 
1078 	return num_stats;
1079 }
1080 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1081 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1082 {
1083 	struct mlx5_core_dev *mdev = priv->mdev;
1084 	int i;
1085 
1086 	strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1087 
1088 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1089 		return idx;
1090 
1091 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1092 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1093 		       pport_phy_statistical_stats_desc[i].format);
1094 
1095 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1096 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1097 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1098 			       pport_phy_statistical_err_lanes_stats_desc[i].format);
1099 
1100 	return idx;
1101 }
1102 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1103 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1104 {
1105 	struct mlx5_core_dev *mdev = priv->mdev;
1106 	int i;
1107 
1108 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
1109 	data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1110 			       counter_set.phys_layer_cntrs.link_down_events);
1111 
1112 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1113 		return idx;
1114 
1115 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1116 		data[idx++] =
1117 			MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1118 					    pport_phy_statistical_stats_desc, i);
1119 
1120 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1121 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1122 			data[idx++] =
1123 				MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1124 						    pport_phy_statistical_err_lanes_stats_desc,
1125 						    i);
1126 	return idx;
1127 }
1128 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1129 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1130 {
1131 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1132 	struct mlx5_core_dev *mdev = priv->mdev;
1133 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1134 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1135 	void *out;
1136 
1137 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1138 	out = pstats->phy_counters;
1139 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1140 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1141 
1142 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1143 		return;
1144 
1145 	out = pstats->phy_statistical_counters;
1146 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1147 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1148 }
1149 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1150 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1151 			 struct ethtool_fec_stats *fec_stats)
1152 {
1153 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1154 	struct mlx5_core_dev *mdev = priv->mdev;
1155 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1156 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1157 
1158 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1159 		return;
1160 
1161 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1162 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1163 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1164 				 sz, MLX5_REG_PPCNT, 0, 0))
1165 		return;
1166 
1167 	fec_stats->corrected_bits.total =
1168 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1169 				      phys_layer_statistical_cntrs,
1170 				      phy_corrected_bits);
1171 }
1172 
1173 #define PPORT_ETH_EXT_OFF(c) \
1174 	MLX5_BYTE_OFF(ppcnt_reg, \
1175 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1176 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1177 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1178 };
1179 
1180 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1181 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1182 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1183 {
1184 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1185 		return NUM_PPORT_ETH_EXT_COUNTERS;
1186 
1187 	return 0;
1188 }
1189 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1190 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1191 {
1192 	int i;
1193 
1194 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1195 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1196 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1197 			       pport_eth_ext_stats_desc[i].format);
1198 	return idx;
1199 }
1200 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1201 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1202 {
1203 	int i;
1204 
1205 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1206 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1207 			data[idx++] =
1208 				MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1209 						    pport_eth_ext_stats_desc, i);
1210 	return idx;
1211 }
1212 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1213 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1214 {
1215 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1216 	struct mlx5_core_dev *mdev = priv->mdev;
1217 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1218 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1219 	void *out;
1220 
1221 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1222 		return;
1223 
1224 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1225 	out = pstats->eth_ext_counters;
1226 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1227 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1228 }
1229 
1230 #define PCIE_PERF_OFF(c) \
1231 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1232 static const struct counter_desc pcie_perf_stats_desc[] = {
1233 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1234 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1235 };
1236 
1237 #define PCIE_PERF_OFF64(c) \
1238 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1239 static const struct counter_desc pcie_perf_stats_desc64[] = {
1240 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1241 };
1242 
1243 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1244 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1245 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1246 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1247 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1248 };
1249 
1250 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1251 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1252 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1253 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1254 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1255 {
1256 	int num_stats = 0;
1257 
1258 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1259 		num_stats += NUM_PCIE_PERF_COUNTERS;
1260 
1261 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1262 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1263 
1264 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1265 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1266 
1267 	return num_stats;
1268 }
1269 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1270 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1271 {
1272 	int i;
1273 
1274 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1275 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1276 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1277 			       pcie_perf_stats_desc[i].format);
1278 
1279 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1280 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1281 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1282 			       pcie_perf_stats_desc64[i].format);
1283 
1284 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1285 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1286 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1287 			       pcie_perf_stall_stats_desc[i].format);
1288 	return idx;
1289 }
1290 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1291 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1292 {
1293 	int i;
1294 
1295 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1296 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1297 			data[idx++] =
1298 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1299 						    pcie_perf_stats_desc, i);
1300 
1301 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1302 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1303 			data[idx++] =
1304 				MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1305 						    pcie_perf_stats_desc64, i);
1306 
1307 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1308 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1309 			data[idx++] =
1310 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1311 						    pcie_perf_stall_stats_desc, i);
1312 	return idx;
1313 }
1314 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1315 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1316 {
1317 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1318 	struct mlx5_core_dev *mdev = priv->mdev;
1319 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1320 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1321 	void *out;
1322 
1323 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1324 		return;
1325 
1326 	out = pcie_stats->pcie_perf_counters;
1327 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1328 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1329 }
1330 
1331 #define PPORT_PER_TC_PRIO_OFF(c) \
1332 	MLX5_BYTE_OFF(ppcnt_reg, \
1333 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1334 
1335 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1336 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1337 };
1338 
1339 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1340 
1341 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1342 	MLX5_BYTE_OFF(ppcnt_reg, \
1343 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1344 
1345 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1346 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1347 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1348 };
1349 
1350 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1351 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1352 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1353 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1354 {
1355 	struct mlx5_core_dev *mdev = priv->mdev;
1356 
1357 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1358 		return 0;
1359 
1360 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1361 }
1362 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1363 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1364 {
1365 	struct mlx5_core_dev *mdev = priv->mdev;
1366 	int i, prio;
1367 
1368 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1369 		return idx;
1370 
1371 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1372 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1373 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1374 				pport_per_tc_prio_stats_desc[i].format, prio);
1375 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1376 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1377 				pport_per_tc_congest_prio_stats_desc[i].format, prio);
1378 	}
1379 
1380 	return idx;
1381 }
1382 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1383 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1384 {
1385 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1386 	struct mlx5_core_dev *mdev = priv->mdev;
1387 	int i, prio;
1388 
1389 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1390 		return idx;
1391 
1392 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1393 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1394 			data[idx++] =
1395 				MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1396 						    pport_per_tc_prio_stats_desc, i);
1397 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1398 			data[idx++] =
1399 				MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1400 						    pport_per_tc_congest_prio_stats_desc, i);
1401 	}
1402 
1403 	return idx;
1404 }
1405 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1406 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1407 {
1408 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1409 	struct mlx5_core_dev *mdev = priv->mdev;
1410 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1411 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1412 	void *out;
1413 	int prio;
1414 
1415 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1416 		return;
1417 
1418 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1419 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1420 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1421 		out = pstats->per_tc_prio_counters[prio];
1422 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1423 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1424 	}
1425 }
1426 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1427 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1428 {
1429 	struct mlx5_core_dev *mdev = priv->mdev;
1430 
1431 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1432 		return 0;
1433 
1434 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1435 }
1436 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1437 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1438 {
1439 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1440 	struct mlx5_core_dev *mdev = priv->mdev;
1441 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1442 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1443 	void *out;
1444 	int prio;
1445 
1446 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1447 		return;
1448 
1449 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1450 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1451 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1452 		out = pstats->per_tc_congest_prio_counters[prio];
1453 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1454 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1455 	}
1456 }
1457 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1458 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1459 {
1460 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1461 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1462 }
1463 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1464 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1465 {
1466 	mlx5e_grp_per_tc_prio_update_stats(priv);
1467 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1468 }
1469 
1470 #define PPORT_PER_PRIO_OFF(c) \
1471 	MLX5_BYTE_OFF(ppcnt_reg, \
1472 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1473 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1474 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1475 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1476 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1477 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1478 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1479 };
1480 
1481 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1482 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1483 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1484 {
1485 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1486 }
1487 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1488 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1489 						   u8 *data,
1490 						   int idx)
1491 {
1492 	int i, prio;
1493 
1494 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1495 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1496 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1497 				pport_per_prio_traffic_stats_desc[i].format, prio);
1498 	}
1499 
1500 	return idx;
1501 }
1502 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1503 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1504 						 u64 *data,
1505 						 int idx)
1506 {
1507 	int i, prio;
1508 
1509 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1510 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1511 			data[idx++] =
1512 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1513 						    pport_per_prio_traffic_stats_desc, i);
1514 	}
1515 
1516 	return idx;
1517 }
1518 
1519 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1520 	/* %s is "global" or "prio{i}" */
1521 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1522 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1523 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1524 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1525 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1526 };
1527 
1528 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1529 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1530 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1531 };
1532 
1533 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1534 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1535 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1536 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1537 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1538 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1539 {
1540 	struct mlx5_core_dev *mdev = priv->mdev;
1541 	u8 pfc_en_tx;
1542 	u8 pfc_en_rx;
1543 	int err;
1544 
1545 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1546 		return 0;
1547 
1548 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1549 
1550 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1551 }
1552 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1553 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1554 {
1555 	struct mlx5_core_dev *mdev = priv->mdev;
1556 	u32 rx_pause;
1557 	u32 tx_pause;
1558 	int err;
1559 
1560 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1561 		return false;
1562 
1563 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1564 
1565 	return err ? false : rx_pause | tx_pause;
1566 }
1567 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1568 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1569 {
1570 	return (mlx5e_query_global_pause_combined(priv) +
1571 		hweight8(mlx5e_query_pfc_combined(priv))) *
1572 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1573 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1574 }
1575 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1576 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1577 					       u8 *data,
1578 					       int idx)
1579 {
1580 	unsigned long pfc_combined;
1581 	int i, prio;
1582 
1583 	pfc_combined = mlx5e_query_pfc_combined(priv);
1584 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1585 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1586 			char pfc_string[ETH_GSTRING_LEN];
1587 
1588 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1589 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1590 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1591 		}
1592 	}
1593 
1594 	if (mlx5e_query_global_pause_combined(priv)) {
1595 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1596 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1597 				pport_per_prio_pfc_stats_desc[i].format, "global");
1598 		}
1599 	}
1600 
1601 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1602 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1603 		       pport_pfc_stall_stats_desc[i].format);
1604 
1605 	return idx;
1606 }
1607 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1608 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1609 					     u64 *data,
1610 					     int idx)
1611 {
1612 	unsigned long pfc_combined;
1613 	int i, prio;
1614 
1615 	pfc_combined = mlx5e_query_pfc_combined(priv);
1616 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1617 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1618 			data[idx++] =
1619 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1620 						    pport_per_prio_pfc_stats_desc, i);
1621 		}
1622 	}
1623 
1624 	if (mlx5e_query_global_pause_combined(priv)) {
1625 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1626 			data[idx++] =
1627 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1628 						    pport_per_prio_pfc_stats_desc, i);
1629 		}
1630 	}
1631 
1632 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1633 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1634 						  pport_pfc_stall_stats_desc, i);
1635 
1636 	return idx;
1637 }
1638 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1639 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1640 {
1641 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1642 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1643 }
1644 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1645 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1646 {
1647 	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1648 	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1649 	return idx;
1650 }
1651 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1652 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1653 {
1654 	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1655 	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1656 	return idx;
1657 }
1658 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1659 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1660 {
1661 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1662 	struct mlx5_core_dev *mdev = priv->mdev;
1663 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1664 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1665 	int prio;
1666 	void *out;
1667 
1668 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1669 		return;
1670 
1671 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1672 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1673 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1674 		out = pstats->per_prio_counters[prio];
1675 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1676 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1677 				     MLX5_REG_PPCNT, 0, 0);
1678 	}
1679 }
1680 
1681 static const struct counter_desc mlx5e_pme_status_desc[] = {
1682 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1683 };
1684 
1685 static const struct counter_desc mlx5e_pme_error_desc[] = {
1686 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1687 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1688 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1689 };
1690 
1691 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1692 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1693 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)1694 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1695 {
1696 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1697 }
1698 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)1699 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1700 {
1701 	int i;
1702 
1703 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1704 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1705 
1706 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1707 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1708 
1709 	return idx;
1710 }
1711 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)1712 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1713 {
1714 	struct mlx5_pme_stats pme_stats;
1715 	int i;
1716 
1717 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
1718 
1719 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1720 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1721 						   mlx5e_pme_status_desc, i);
1722 
1723 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1724 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1725 						   mlx5e_pme_error_desc, i);
1726 
1727 	return idx;
1728 }
1729 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)1730 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1731 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)1732 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1733 {
1734 	return mlx5e_tls_get_count(priv);
1735 }
1736 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)1737 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1738 {
1739 	return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1740 }
1741 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)1742 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1743 {
1744 	return idx + mlx5e_tls_get_stats(priv, data + idx);
1745 }
1746 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)1747 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1748 
1749 static const struct counter_desc rq_stats_desc[] = {
1750 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1751 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1752 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1753 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1754 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1755 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1756 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1757 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1758 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1759 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1760 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1761 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1762 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1763 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1764 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1765 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1766 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1767 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1768 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1769 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1770 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1771 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1772 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1773 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1774 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1775 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1776 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1777 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1778 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1779 #ifdef CONFIG_MLX5_EN_TLS
1780 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1781 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1782 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1783 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1784 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1785 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1786 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1787 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1788 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1789 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1790 #endif
1791 };
1792 
1793 static const struct counter_desc sq_stats_desc[] = {
1794 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1795 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1796 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1797 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1798 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1799 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1800 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1801 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1802 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1803 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1804 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1805 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1806 #ifdef CONFIG_MLX5_EN_TLS
1807 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1808 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1809 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1810 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1811 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1812 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1813 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1814 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1815 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1816 #endif
1817 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1818 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1819 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1820 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1821 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1822 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1823 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1824 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1825 };
1826 
1827 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1828 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1829 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1830 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1831 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1832 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1833 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1834 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1835 };
1836 
1837 static const struct counter_desc xdpsq_stats_desc[] = {
1838 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1839 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1840 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1841 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1842 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1843 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1844 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1845 };
1846 
1847 static const struct counter_desc xskrq_stats_desc[] = {
1848 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1849 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1850 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1851 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1852 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1853 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1854 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1855 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1856 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1857 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1858 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1859 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1860 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1861 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1862 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1863 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1864 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1865 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1866 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1867 };
1868 
1869 static const struct counter_desc xsksq_stats_desc[] = {
1870 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1871 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1872 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1873 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1874 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1875 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1876 };
1877 
1878 static const struct counter_desc ch_stats_desc[] = {
1879 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1880 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1881 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1882 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1883 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1884 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1885 };
1886 
1887 static const struct counter_desc ptp_sq_stats_desc[] = {
1888 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
1889 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
1890 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1891 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1892 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1893 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
1894 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1895 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
1896 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
1897 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1898 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
1899 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
1900 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
1901 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1902 };
1903 
1904 static const struct counter_desc ptp_ch_stats_desc[] = {
1905 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
1906 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
1907 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
1908 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1909 };
1910 
1911 static const struct counter_desc ptp_cq_stats_desc[] = {
1912 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
1913 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
1914 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
1915 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
1916 };
1917 
1918 static const struct counter_desc ptp_rq_stats_desc[] = {
1919 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
1920 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
1921 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1922 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1923 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1924 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1925 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1926 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
1927 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1928 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1929 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
1930 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
1931 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1932 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1933 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1934 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1935 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1936 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1937 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1938 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1939 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1940 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
1941 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
1942 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
1943 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
1944 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
1945 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1946 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1947 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
1948 };
1949 
1950 static const struct counter_desc qos_sq_stats_desc[] = {
1951 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
1952 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
1953 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1954 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1955 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1956 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1957 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1958 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1959 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1960 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
1961 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1962 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1963 #ifdef CONFIG_MLX5_EN_TLS
1964 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1965 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1966 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1967 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1968 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1969 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1970 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1971 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1972 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1973 #endif
1974 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1975 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
1976 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
1977 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1978 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
1979 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
1980 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
1981 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1982 };
1983 
1984 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
1985 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
1986 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
1987 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
1988 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
1989 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
1990 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
1991 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
1992 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
1993 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
1994 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
1995 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
1996 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)1997 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
1998 {
1999 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2000 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
2001 }
2002 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2003 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2004 {
2005 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2006 	u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2007 	int i, qid;
2008 
2009 	for (qid = 0; qid < max_qos_sqs; qid++)
2010 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2011 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2012 				qos_sq_stats_desc[i].format, qid);
2013 
2014 	return idx;
2015 }
2016 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2017 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2018 {
2019 	struct mlx5e_sq_stats **stats;
2020 	u16 max_qos_sqs;
2021 	int i, qid;
2022 
2023 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2024 	max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2025 	stats = READ_ONCE(priv->htb.qos_sq_stats);
2026 
2027 	for (qid = 0; qid < max_qos_sqs; qid++) {
2028 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2029 
2030 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2031 			data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2032 	}
2033 
2034 	return idx;
2035 }
2036 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2037 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2038 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2039 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2040 {
2041 	int num = NUM_PTP_CH_STATS;
2042 
2043 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2044 		return 0;
2045 
2046 	if (priv->tx_ptp_opened)
2047 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2048 	if (priv->rx_ptp_opened)
2049 		num += NUM_PTP_RQ_STATS;
2050 
2051 	return num;
2052 }
2053 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2054 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2055 {
2056 	int i, tc;
2057 
2058 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2059 		return idx;
2060 
2061 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2062 		sprintf(data + (idx++) * ETH_GSTRING_LEN,
2063 			ptp_ch_stats_desc[i].format);
2064 
2065 	if (priv->tx_ptp_opened) {
2066 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2067 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2068 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2069 					ptp_sq_stats_desc[i].format, tc);
2070 
2071 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2072 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2073 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2074 					ptp_cq_stats_desc[i].format, tc);
2075 	}
2076 	if (priv->rx_ptp_opened) {
2077 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2078 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2079 				ptp_rq_stats_desc[i].format);
2080 	}
2081 	return idx;
2082 }
2083 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2084 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2085 {
2086 	int i, tc;
2087 
2088 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2089 		return idx;
2090 
2091 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2092 		data[idx++] =
2093 			MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2094 					     ptp_ch_stats_desc, i);
2095 
2096 	if (priv->tx_ptp_opened) {
2097 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2098 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2099 				data[idx++] =
2100 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2101 							     ptp_sq_stats_desc, i);
2102 
2103 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2104 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2105 				data[idx++] =
2106 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2107 							     ptp_cq_stats_desc, i);
2108 	}
2109 	if (priv->rx_ptp_opened) {
2110 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2111 			data[idx++] =
2112 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2113 						     ptp_rq_stats_desc, i);
2114 	}
2115 	return idx;
2116 }
2117 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2118 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2119 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2120 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2121 {
2122 	int max_nch = priv->max_nch;
2123 
2124 	return (NUM_RQ_STATS * max_nch) +
2125 	       (NUM_CH_STATS * max_nch) +
2126 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2127 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2128 	       (NUM_XDPSQ_STATS * max_nch) +
2129 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2130 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2131 }
2132 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2133 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2134 {
2135 	bool is_xsk = priv->xsk.ever_used;
2136 	int max_nch = priv->max_nch;
2137 	int i, j, tc;
2138 
2139 	for (i = 0; i < max_nch; i++)
2140 		for (j = 0; j < NUM_CH_STATS; j++)
2141 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2142 				ch_stats_desc[j].format, i);
2143 
2144 	for (i = 0; i < max_nch; i++) {
2145 		for (j = 0; j < NUM_RQ_STATS; j++)
2146 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2147 				rq_stats_desc[j].format, i);
2148 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2149 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2150 				xskrq_stats_desc[j].format, i);
2151 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2152 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2153 				rq_xdpsq_stats_desc[j].format, i);
2154 	}
2155 
2156 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2157 		for (i = 0; i < max_nch; i++)
2158 			for (j = 0; j < NUM_SQ_STATS; j++)
2159 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2160 					sq_stats_desc[j].format,
2161 					i + tc * max_nch);
2162 
2163 	for (i = 0; i < max_nch; i++) {
2164 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2165 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2166 				xsksq_stats_desc[j].format, i);
2167 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2168 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2169 				xdpsq_stats_desc[j].format, i);
2170 	}
2171 
2172 	return idx;
2173 }
2174 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2175 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2176 {
2177 	bool is_xsk = priv->xsk.ever_used;
2178 	int max_nch = priv->max_nch;
2179 	int i, j, tc;
2180 
2181 	for (i = 0; i < max_nch; i++)
2182 		for (j = 0; j < NUM_CH_STATS; j++)
2183 			data[idx++] =
2184 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
2185 						     ch_stats_desc, j);
2186 
2187 	for (i = 0; i < max_nch; i++) {
2188 		for (j = 0; j < NUM_RQ_STATS; j++)
2189 			data[idx++] =
2190 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
2191 						     rq_stats_desc, j);
2192 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2193 			data[idx++] =
2194 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
2195 						     xskrq_stats_desc, j);
2196 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2197 			data[idx++] =
2198 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
2199 						     rq_xdpsq_stats_desc, j);
2200 	}
2201 
2202 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2203 		for (i = 0; i < max_nch; i++)
2204 			for (j = 0; j < NUM_SQ_STATS; j++)
2205 				data[idx++] =
2206 					MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
2207 							     sq_stats_desc, j);
2208 
2209 	for (i = 0; i < max_nch; i++) {
2210 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2211 			data[idx++] =
2212 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
2213 						     xsksq_stats_desc, j);
2214 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2215 			data[idx++] =
2216 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
2217 						     xdpsq_stats_desc, j);
2218 	}
2219 
2220 	return idx;
2221 }
2222 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2223 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2224 
2225 MLX5E_DEFINE_STATS_GRP(sw, 0);
2226 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2227 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2228 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2229 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2230 MLX5E_DEFINE_STATS_GRP(2863, 0);
2231 MLX5E_DEFINE_STATS_GRP(2819, 0);
2232 MLX5E_DEFINE_STATS_GRP(phy, 0);
2233 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2234 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2235 MLX5E_DEFINE_STATS_GRP(pme, 0);
2236 MLX5E_DEFINE_STATS_GRP(channels, 0);
2237 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2238 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2239 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2240 static MLX5E_DEFINE_STATS_GRP(ptp, 0);
2241 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2242 
2243 /* The stats groups order is opposite to the update_stats() order calls */
2244 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2245 	&MLX5E_STATS_GRP(sw),
2246 	&MLX5E_STATS_GRP(qcnt),
2247 	&MLX5E_STATS_GRP(vnic_env),
2248 	&MLX5E_STATS_GRP(vport),
2249 	&MLX5E_STATS_GRP(802_3),
2250 	&MLX5E_STATS_GRP(2863),
2251 	&MLX5E_STATS_GRP(2819),
2252 	&MLX5E_STATS_GRP(phy),
2253 	&MLX5E_STATS_GRP(eth_ext),
2254 	&MLX5E_STATS_GRP(pcie),
2255 	&MLX5E_STATS_GRP(per_prio),
2256 	&MLX5E_STATS_GRP(pme),
2257 #ifdef CONFIG_MLX5_EN_IPSEC
2258 	&MLX5E_STATS_GRP(ipsec_sw),
2259 	&MLX5E_STATS_GRP(ipsec_hw),
2260 #endif
2261 	&MLX5E_STATS_GRP(tls),
2262 	&MLX5E_STATS_GRP(channels),
2263 	&MLX5E_STATS_GRP(per_port_buff_congest),
2264 	&MLX5E_STATS_GRP(ptp),
2265 	&MLX5E_STATS_GRP(qos),
2266 };
2267 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2268 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2269 {
2270 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2271 }
2272