xref: /freebsd/sys/dev/mana/mana_sysctl.c (revision 516b5059)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 #include "mana_sysctl.h"
32 
33 static int mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS);
34 
35 int mana_log_level = MANA_ALERT | MANA_WARNING | MANA_INFO;
36 
37 SYSCTL_NODE(_hw, OID_AUTO, mana, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
38     "MANA driver parameters");
39 
40 /*
41  * Logging level for changing verbosity of the output
42  */
43 SYSCTL_INT(_hw_mana, OID_AUTO, log_level, CTLFLAG_RWTUN,
44     &mana_log_level, 0, "Logging level indicating verbosity of the logs");
45 
46 SYSCTL_CONST_STRING(_hw_mana, OID_AUTO, driver_version, CTLFLAG_RD,
47     DRV_MODULE_VERSION, "MANA driver version");
48 
49 static int
mana_sysctl_rx_stat_agg_u64(SYSCTL_HANDLER_ARGS)50 mana_sysctl_rx_stat_agg_u64(SYSCTL_HANDLER_ARGS)
51 {
52 	struct mana_port_context *apc = arg1;
53 	int offset = arg2, i, err;
54 	struct mana_rxq *rxq;
55 	uint64_t stat;
56 
57 	stat = 0;
58 	for (i = 0; i < apc->num_queues; i++) {
59 		rxq = apc->rxqs[i];
60 		stat += *((uint64_t *)((uint8_t *)rxq + offset));
61 	}
62 
63 	err = sysctl_handle_64(oidp, &stat, 0, req);
64 	if (err || req->newptr == NULL)
65 		return err;
66 
67 	for (i = 0; i < apc->num_queues; i++) {
68 		rxq = apc->rxqs[i];
69 		*((uint64_t *)((uint8_t *)rxq + offset)) = 0;
70 	}
71 	return 0;
72 }
73 
74 static int
mana_sysctl_rx_stat_u16(SYSCTL_HANDLER_ARGS)75 mana_sysctl_rx_stat_u16(SYSCTL_HANDLER_ARGS)
76 {
77 	struct mana_port_context *apc = arg1;
78 	int offset = arg2, err;
79 	struct mana_rxq *rxq;
80 	uint64_t stat;
81 	uint16_t val;
82 
83 	rxq = apc->rxqs[0];
84 	val = *((uint16_t *)((uint8_t *)rxq + offset));
85 	stat = val;
86 
87 	err = sysctl_handle_64(oidp, &stat, 0, req);
88 	if (err || req->newptr == NULL)
89 		return err;
90 	else
91 		return 0;
92 }
93 
94 static int
mana_sysctl_rx_stat_u32(SYSCTL_HANDLER_ARGS)95 mana_sysctl_rx_stat_u32(SYSCTL_HANDLER_ARGS)
96 {
97 	struct mana_port_context *apc = arg1;
98 	int offset = arg2, err;
99 	struct mana_rxq *rxq;
100 	uint64_t stat;
101 	uint32_t val;
102 
103 	rxq = apc->rxqs[0];
104 	val = *((uint32_t *)((uint8_t *)rxq + offset));
105 	stat = val;
106 
107 	err = sysctl_handle_64(oidp, &stat, 0, req);
108 	if (err || req->newptr == NULL)
109 		return err;
110 	else
111 		return 0;
112 }
113 
114 static int
mana_sysctl_tx_stat_agg_u64(SYSCTL_HANDLER_ARGS)115 mana_sysctl_tx_stat_agg_u64(SYSCTL_HANDLER_ARGS)
116 {
117 	struct mana_port_context *apc = arg1;
118 	int offset = arg2, i, err;
119 	struct mana_txq *txq;
120 	uint64_t stat;
121 
122 	stat = 0;
123 	for (i = 0; i < apc->num_queues; i++) {
124 		txq = &apc->tx_qp[i].txq;
125 		stat += *((uint64_t *)((uint8_t *)txq + offset));
126 	}
127 
128 	err = sysctl_handle_64(oidp, &stat, 0, req);
129 	if (err || req->newptr == NULL)
130 		return err;
131 
132 	for (i = 0; i < apc->num_queues; i++) {
133 		txq = &apc->tx_qp[i].txq;
134 		*((uint64_t *)((uint8_t *)txq + offset)) = 0;
135 	}
136 	return 0;
137 }
138 
139 void
mana_sysctl_add_port(struct mana_port_context * apc)140 mana_sysctl_add_port(struct mana_port_context *apc)
141 {
142 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
143 	device_t dev = gc->dev;
144 	struct sysctl_ctx_list *ctx;
145 	struct sysctl_oid *tree;
146 	struct sysctl_oid_list *child;
147 	struct mana_port_stats *port_stats;
148 	char node_name[32];
149 
150 	struct sysctl_oid *port_node, *stats_node;
151 	struct sysctl_oid_list *stats_list;
152 
153 	ctx = device_get_sysctl_ctx(dev);
154 	tree = device_get_sysctl_tree(dev);
155 	child = SYSCTL_CHILDREN(tree);
156 
157 	port_stats = &apc->port_stats;
158 
159 	snprintf(node_name, 32, "port%d", apc->port_idx);
160 
161 	port_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
162 	    node_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Port Name");
163 	apc->port_list = SYSCTL_CHILDREN(port_node);
164 
165 	SYSCTL_ADD_BOOL(ctx, apc->port_list, OID_AUTO,
166 	    "enable_altq", CTLFLAG_RW, &apc->enable_tx_altq, 0,
167 	    "Choose alternative txq under heavy load");
168 
169 	SYSCTL_ADD_PROC(ctx, apc->port_list, OID_AUTO,
170 	    "bind_cleanup_thread_cpu",
171 	    CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE,
172 	    apc, 0, mana_sysctl_cleanup_thread_cpu, "I",
173 	    "Bind cleanup thread to a cpu. 0 disables it.");
174 
175 	stats_node = SYSCTL_ADD_NODE(ctx, apc->port_list, OID_AUTO,
176 	    "port_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
177 	    "Statistics of port");
178 	stats_list = SYSCTL_CHILDREN(stats_node);
179 
180 	SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_packets",
181 	    CTLFLAG_RD, &port_stats->rx_packets, "Packets received");
182 	SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_packets",
183 	    CTLFLAG_RD, &port_stats->tx_packets, "Packets transmitted");
184 	SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_bytes",
185 	    CTLFLAG_RD, &port_stats->rx_bytes, "Bytes received");
186 	SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_bytes",
187 	    CTLFLAG_RD, &port_stats->tx_bytes, "Bytes transmitted");
188 	SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_drops",
189 	    CTLFLAG_RD, &port_stats->rx_drops, "Receive packet drops");
190 	SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_drops",
191 	    CTLFLAG_RD, &port_stats->tx_drops, "Transmit packet drops");
192 
193 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_queued",
194 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc,
195 	    __offsetof(struct mana_rxq, lro.lro_queued),
196 	    mana_sysctl_rx_stat_agg_u64, "LU", "LRO queued");
197 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_flushed",
198 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc,
199 	    __offsetof(struct mana_rxq, lro.lro_flushed),
200 	    mana_sysctl_rx_stat_agg_u64, "LU", "LRO flushed");
201 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_bad_csum",
202 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc,
203 	    __offsetof(struct mana_rxq, lro.lro_bad_csum),
204 	    mana_sysctl_rx_stat_agg_u64, "LU", "LRO bad checksum");
205 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_tried",
206 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
207 	    __offsetof(struct mana_rxq, lro_tried),
208 	    mana_sysctl_rx_stat_agg_u64, "LU", "LRO tried");
209 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_failed",
210 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
211 	    __offsetof(struct mana_rxq, lro_failed),
212 	    mana_sysctl_rx_stat_agg_u64, "LU", "LRO failed");
213 
214 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_ackcnt_lim",
215 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
216 	    __offsetof(struct mana_rxq, lro.lro_ackcnt_lim),
217 	    mana_sysctl_rx_stat_u16,
218 	    "LU", "Max # of ACKs to be aggregated by LRO");
219 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_length_lim",
220 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
221 	    __offsetof(struct mana_rxq, lro.lro_length_lim),
222 	    mana_sysctl_rx_stat_u32,
223 	    "LU", "Max len of aggregated data in byte by LRO");
224 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_cnt",
225 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
226 	    __offsetof(struct mana_rxq, lro.lro_cnt),
227 	    mana_sysctl_rx_stat_u32,
228 	    "LU", "Max # or LRO packet count");
229 
230 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_packets",
231 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
232 	    __offsetof(struct mana_txq, tso_pkts),
233 	    mana_sysctl_tx_stat_agg_u64, "LU", "TSO packets");
234 	SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_bytes",
235 	    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
236 	    __offsetof(struct mana_txq, tso_bytes),
237 	    mana_sysctl_tx_stat_agg_u64, "LU", "TSO bytes");
238 }
239 
240 void
mana_sysctl_add_queues(struct mana_port_context * apc)241 mana_sysctl_add_queues(struct mana_port_context *apc)
242 {
243 	struct sysctl_ctx_list *ctx = &apc->que_sysctl_ctx;
244 	struct sysctl_oid_list *child = apc->port_list;
245 
246 	struct sysctl_oid *queue_node, *tx_node, *rx_node;
247 	struct sysctl_oid_list *queue_list, *tx_list, *rx_list;
248 	struct mana_txq *txq;
249 	struct mana_rxq *rxq;
250 	struct mana_stats *tx_stats, *rx_stats;
251 	char que_name[32];
252 	int i;
253 
254 	sysctl_ctx_init(ctx);
255 
256 	for (i = 0; i < apc->num_queues; i++) {
257 		rxq = apc->rxqs[i];
258 		txq = &apc->tx_qp[i].txq;
259 
260 		snprintf(que_name, 32, "queue%d", i);
261 
262 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
263 		    que_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
264 		queue_list = SYSCTL_CHILDREN(queue_node);
265 
266 		/* TX stats */
267 		tx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO,
268 		    "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX queue");
269 		tx_list = SYSCTL_CHILDREN(tx_node);
270 
271 		tx_stats = &txq->stats;
272 
273 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "count",
274 		    CTLFLAG_RD, &tx_stats->packets, "Packets sent");
275 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "bytes",
276 		    CTLFLAG_RD, &tx_stats->bytes, "Bytes sent");
277 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_wakeups",
278 		    CTLFLAG_RD, &tx_stats->wakeup, "Queue wakeups");
279 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_stops",
280 		    CTLFLAG_RD, &tx_stats->stop, "Queue stops");
281 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "mbuf_collapse",
282 		    CTLFLAG_RD, &tx_stats->collapse, "Mbuf collapse count");
283 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
284 		    "mbuf_collapse_err", CTLFLAG_RD,
285 		    &tx_stats->collapse_err, "Mbuf collapse failures");
286 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
287 		    "dma_mapping_err", CTLFLAG_RD,
288 		    &tx_stats->dma_mapping_err, "DMA mapping failures");
289 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
290 		    "alt_chg", CTLFLAG_RD,
291 		    &tx_stats->alt_chg, "Switch to alternative txq");
292 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
293 		    "alt_reset", CTLFLAG_RD,
294 		    &tx_stats->alt_reset, "Reset to self txq");
295 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
296 		    "cqe_err", CTLFLAG_RD,
297 		    &tx_stats->cqe_err, "Error CQE count");
298 		SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
299 		    "cqe_unknown_type", CTLFLAG_RD,
300 		    &tx_stats->cqe_unknown_type, "Unknown CQE count");
301 
302 		/* RX stats */
303 		rx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO,
304 		    "rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX queue");
305 		rx_list = SYSCTL_CHILDREN(rx_node);
306 
307 		rx_stats = &rxq->stats;
308 
309 		SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "count",
310 		    CTLFLAG_RD, &rx_stats->packets, "Packets received");
311 		SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bytes",
312 		    CTLFLAG_RD, &rx_stats->bytes, "Bytes received");
313 		SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
314 		    "mbuf_alloc_fail", CTLFLAG_RD,
315 		    &rx_stats->mbuf_alloc_fail, "Failed mbuf allocs");
316 		SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
317 		    "dma_mapping_err", CTLFLAG_RD,
318 		    &rx_stats->dma_mapping_err, "DMA mapping errors");
319 	}
320 }
321 
322 /*
323  * Free all queues' sysctl trees attached to the port's tree.
324  */
325 void
mana_sysctl_free_queues(struct mana_port_context * apc)326 mana_sysctl_free_queues(struct mana_port_context *apc)
327 {
328 	sysctl_ctx_free(&apc->que_sysctl_ctx);
329 }
330 
331 static int
mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS)332 mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS)
333 {
334 	struct mana_port_context *apc = arg1;
335 	bool bind_cpu = false;
336 	uint8_t val;
337 	int err;
338 
339 	val = 0;
340 	err = sysctl_wire_old_buffer(req, sizeof(val));
341 	if (err == 0) {
342 		val = apc->bind_cleanup_thread_cpu;
343 		err = sysctl_handle_8(oidp, &val, 0, req);
344 	}
345 
346 	if (err != 0 || req->newptr == NULL)
347 		return (err);
348 
349 	if (val != 0)
350 		bind_cpu = true;
351 
352 	if (bind_cpu != apc->bind_cleanup_thread_cpu) {
353 		apc->bind_cleanup_thread_cpu = bind_cpu;
354 		err = mana_restart(apc);
355 	}
356 
357 	return (err);
358 }
359