xref: /freebsd/sys/dev/cxgbe/t4_tracer.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/lock.h>
37 #include <sys/types.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/sx.h>
42 #include <net/bpf.h>
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_clone.h>
46 #include <net/if_types.h>
47 
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "t4_ioctl.h"
52 
53 /*
54  * Locking notes
55  * =============
56  *
57  * An interface cloner is registered during mod_load and it can be used to
58  * create or destroy the tracing ifnet for an adapter at any time.  It is
59  * possible for the cloned interface to outlive the adapter (adapter disappears
60  * in t4_detach but the tracing ifnet may live till mod_unload when removal of
61  * the cloner finally destroys any remaining cloned interfaces).  When tracing
62  * filters are active, this ifnet is also receiving data.  There are potential
63  * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
64  * cxgbe_detach/t4_detach, mod_unload.
65  *
66  * a) The driver selects an iq for tracing (sc->traceq) inside a synch op.  The
67  *    iq is destroyed inside a synch op too (and sc->traceq updated).
68  * b) The cloner looks for an adapter that matches the name of the ifnet it's
69  *    been asked to create, starts a synch op on that adapter, and proceeds only
70  *    if the adapter has a tracing iq.
71  * c) The cloned ifnet and the adapter are coupled to each other via
72  *    ifp->if_softc and sc->ifp.  These can be modified only with the global
73  *    t4_trace_lock sx as well as the sc->ifp_lock mutex held.  Holding either
74  *    of these will prevent any change.
75  *
76  * The order in which all the locks involved should be acquired are:
77  * t4_list_lock
78  * adapter lock
79  * (begin synch op and let go of the above two)
80  * t4_trace_lock
81  * sc->ifp_lock
82  */
83 
84 static struct sx t4_trace_lock;
85 static const char *t4_cloner_name = "tXnex";
86 static struct if_clone *t4_cloner;
87 
88 /* tracer ifnet routines.  mostly no-ops. */
89 static void tracer_init(void *);
90 static int tracer_ioctl(if_t, unsigned long, caddr_t);
91 static int tracer_transmit(if_t, struct mbuf *);
92 static void tracer_qflush(if_t);
93 static int tracer_media_change(if_t);
94 static void tracer_media_status(if_t, struct ifmediareq *);
95 
96 /* match name (request/response) */
97 struct match_rr {
98 	const char *name;
99 	int lock;	/* set to 1 to returned sc locked. */
100 	struct adapter *sc;
101 	int rc;
102 };
103 
104 static void
105 match_name(struct adapter *sc, void *arg)
106 {
107 	struct match_rr *mrr = arg;
108 
109 	if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
110 		return;
111 
112 	KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
113 	    __func__, mrr->sc, sc, mrr->name));
114 
115 	mrr->sc = sc;
116 	if (mrr->lock)
117 		mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
118 	else
119 		mrr->rc = 0;
120 }
121 
122 static int
123 t4_cloner_match(struct if_clone *ifc, const char *name)
124 {
125 
126 	if (strncmp(name, "t4nex", 5) != 0 &&
127 	    strncmp(name, "t5nex", 5) != 0 &&
128 	    strncmp(name, "t6nex", 5) != 0)
129 		return (0);
130 	if (name[5] < '0' || name[5] > '9')
131 		return (0);
132 	return (1);
133 }
134 
135 static int
136 t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
137 {
138 	struct match_rr mrr;
139 	struct adapter *sc;
140 	if_t ifp;
141 	int rc, unit;
142 	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
143 
144 	mrr.name = name;
145 	mrr.lock = 1;
146 	mrr.sc = NULL;
147 	mrr.rc = ENOENT;
148 	t4_iterate(match_name, &mrr);
149 
150 	if (mrr.rc != 0)
151 		return (mrr.rc);
152 	sc = mrr.sc;
153 
154 	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
155 	    __func__, name));
156 	ASSERT_SYNCHRONIZED_OP(sc);
157 
158 	sx_xlock(&t4_trace_lock);
159 
160 	if (sc->ifp != NULL) {
161 		rc = EEXIST;
162 		goto done;
163 	}
164 	if (sc->traceq < 0) {
165 		rc = EAGAIN;
166 		goto done;
167 	}
168 
169 
170 	unit = -1;
171 	rc = ifc_alloc_unit(ifc, &unit);
172 	if (rc != 0)
173 		goto done;
174 
175 	ifp = if_alloc(IFT_ETHER);
176 	if (ifp == NULL) {
177 		ifc_free_unit(ifc, unit);
178 		rc = ENOMEM;
179 		goto done;
180 	}
181 
182 	/* Note that if_xname is not <if_dname><if_dunit>. */
183 	if_initname(ifp, name, unit);
184 	if_setdname(ifp, t4_cloner_name);
185 	if_setinitfn(ifp, tracer_init);
186 	if_setflags(ifp, IFF_SIMPLEX | IFF_DRV_RUNNING);
187 	if_setioctlfn(ifp, tracer_ioctl);
188 	if_settransmitfn(ifp, tracer_transmit);
189 	if_setqflushfn(ifp, tracer_qflush);
190 	if_setcapabilities(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU);
191 	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
192 	    tracer_media_status);
193 	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
194 	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
195 	ether_ifattach(ifp, lla);
196 
197 	mtx_lock(&sc->ifp_lock);
198 	if_setsoftc(ifp, sc);
199 	sc->ifp = ifp;
200 	mtx_unlock(&sc->ifp_lock);
201 done:
202 	sx_xunlock(&t4_trace_lock);
203 	end_synchronized_op(sc, 0);
204 	return (rc);
205 }
206 
207 static int
208 t4_cloner_destroy(struct if_clone *ifc, if_t ifp)
209 {
210 	struct adapter *sc;
211 	int unit = if_getdunit(ifp);
212 
213 	sx_xlock(&t4_trace_lock);
214 	sc = if_getsoftc(ifp);
215 	if (sc != NULL) {
216 		mtx_lock(&sc->ifp_lock);
217 		sc->ifp = NULL;
218 		if_setsoftc(ifp, NULL);
219 		mtx_unlock(&sc->ifp_lock);
220 		ifmedia_removeall(&sc->media);
221 	}
222 	ether_ifdetach(ifp);
223 	if_free(ifp);
224 	ifc_free_unit(ifc, unit);
225 	sx_xunlock(&t4_trace_lock);
226 
227 	return (0);
228 }
229 
230 void
231 t4_tracer_modload(void)
232 {
233 
234 	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
235 	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
236 	    t4_cloner_create, t4_cloner_destroy);
237 }
238 
239 void
240 t4_tracer_modunload(void)
241 {
242 
243 	if (t4_cloner != NULL) {
244 		/*
245 		 * The module is being unloaded so the nexus drivers have
246 		 * detached.  The tracing interfaces can not outlive the nexus
247 		 * (ifp->if_softc is the nexus) and must have been destroyed
248 		 * already.  XXX: but if_clone is opaque to us and we can't
249 		 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
250 		 */
251 		if_clone_detach(t4_cloner);
252 	}
253 	sx_destroy(&t4_trace_lock);
254 }
255 
256 void
257 t4_tracer_port_detach(struct adapter *sc)
258 {
259 
260 	sx_xlock(&t4_trace_lock);
261 	if (sc->ifp != NULL) {
262 		mtx_lock(&sc->ifp_lock);
263 		if_setsoftc(sc->ifp, NULL);
264 		sc->ifp = NULL;
265 		mtx_unlock(&sc->ifp_lock);
266 	}
267 	ifmedia_removeall(&sc->media);
268 	sx_xunlock(&t4_trace_lock);
269 }
270 
271 int
272 t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
273 {
274 	int rc, i, enabled;
275 	struct trace_params tp;
276 
277 	if (t->idx >= NTRACE) {
278 		t->idx = 0xff;
279 		t->enabled = 0;
280 		t->valid = 0;
281 		return (0);
282 	}
283 
284 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
285 	    "t4gett");
286 	if (rc)
287 		return (rc);
288 
289 	if (hw_off_limits(sc)) {
290 		rc = ENXIO;
291 		goto done;
292 	}
293 
294 	for (i = t->idx; i < NTRACE; i++) {
295 		if (isset(&sc->tracer_valid, t->idx)) {
296 			t4_get_trace_filter(sc, &tp, i, &enabled);
297 			t->idx = i;
298 			t->enabled = enabled;
299 			t->valid = 1;
300 			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
301 			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
302 			t->tp.snap_len = tp.snap_len;
303 			t->tp.min_len = tp.min_len;
304 			t->tp.skip_ofst = tp.skip_ofst;
305 			t->tp.skip_len = tp.skip_len;
306 			t->tp.invert = tp.invert;
307 
308 			/* convert channel to port iff 0 <= port < 8. */
309 			if (tp.port < 4)
310 				t->tp.port = sc->chan_map[tp.port];
311 			else if (tp.port < 8)
312 				t->tp.port = sc->chan_map[tp.port - 4] + 4;
313 			else
314 				t->tp.port = tp.port;
315 
316 			goto done;
317 		}
318 	}
319 
320 	t->idx = 0xff;
321 	t->enabled = 0;
322 	t->valid = 0;
323 done:
324 	end_synchronized_op(sc, LOCK_HELD);
325 
326 	return (rc);
327 }
328 
329 int
330 t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
331 {
332 	int rc;
333 	struct trace_params tp, *tpp;
334 
335 	if (t->idx >= NTRACE)
336 		return (EINVAL);
337 
338 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
339 	    "t4sett");
340 	if (rc)
341 		return (rc);
342 
343 	if (hw_off_limits(sc)) {
344 		rc = ENXIO;
345 		goto done;
346 	}
347 
348 	/*
349 	 * If no tracing filter is specified this time then check if the filter
350 	 * at the index is valid anyway because it was set previously.  If so
351 	 * then this is a legitimate enable/disable operation.
352 	 */
353 	if (t->valid == 0) {
354 		if (isset(&sc->tracer_valid, t->idx))
355 			tpp = NULL;
356 		else
357 			rc = EINVAL;
358 		goto done;
359 	}
360 
361 	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
362 	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
363 	    t->tp.skip_ofst > M_TFOFFSET) {
364 		rc = EINVAL;
365 		goto done;
366 	}
367 
368 	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
369 	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
370 	tp.snap_len = t->tp.snap_len;
371 	tp.min_len = t->tp.min_len;
372 	tp.skip_ofst = t->tp.skip_ofst;
373 	tp.skip_len = t->tp.skip_len;
374 	tp.invert = !!t->tp.invert;
375 
376 	/* convert port to channel iff 0 <= port < 8. */
377 	if (t->tp.port < 4) {
378 		if (sc->port[t->tp.port] == NULL) {
379 			rc = EINVAL;
380 			goto done;
381 		}
382 		tp.port = sc->port[t->tp.port]->tx_chan;
383 	} else if (t->tp.port < 8) {
384 		if (sc->port[t->tp.port - 4] == NULL) {
385 			rc = EINVAL;
386 			goto done;
387 		}
388 		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
389 	} else
390 		tp.port = t->tp.port;
391 	tpp = &tp;
392 done:
393 	if (rc == 0) {
394 		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
395 		if (rc == 0) {
396 			if (t->enabled) {
397 				setbit(&sc->tracer_valid, t->idx);
398 				if (sc->tracer_enabled == 0) {
399 					t4_set_reg_field(sc, A_MPS_TRC_CFG,
400 					    F_TRCEN, F_TRCEN);
401 				}
402 				setbit(&sc->tracer_enabled, t->idx);
403 			} else {
404 				clrbit(&sc->tracer_enabled, t->idx);
405 				if (sc->tracer_enabled == 0) {
406 					t4_set_reg_field(sc, A_MPS_TRC_CFG,
407 					    F_TRCEN, 0);
408 				}
409 			}
410 		}
411 	}
412 	end_synchronized_op(sc, LOCK_HELD);
413 
414 	return (rc);
415 }
416 
417 int
418 t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
419 {
420 	struct adapter *sc = iq->adapter;
421 	if_t ifp;
422 
423 	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
424 	    rss->opcode));
425 
426 	mtx_lock(&sc->ifp_lock);
427 	ifp = sc->ifp;
428 	if (sc->ifp) {
429 		m_adj(m, sizeof(struct cpl_trace_pkt));
430 		m->m_pkthdr.rcvif = ifp;
431 		ETHER_BPF_MTAP(ifp, m);
432 	}
433 	mtx_unlock(&sc->ifp_lock);
434 	m_freem(m);
435 
436 	return (0);
437 }
438 
439 int
440 t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
441 {
442 	struct adapter *sc = iq->adapter;
443 	if_t ifp;
444 
445 	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
446 	    rss->opcode));
447 
448 	mtx_lock(&sc->ifp_lock);
449 	ifp = sc->ifp;
450 	if (ifp != NULL) {
451 		m_adj(m, sizeof(struct cpl_t5_trace_pkt));
452 		m->m_pkthdr.rcvif = ifp;
453 		ETHER_BPF_MTAP(ifp, m);
454 	}
455 	mtx_unlock(&sc->ifp_lock);
456 	m_freem(m);
457 
458 	return (0);
459 }
460 
461 
462 static void
463 tracer_init(void *arg)
464 {
465 
466 	return;
467 }
468 
469 static int
470 tracer_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
471 {
472 	int rc = 0;
473 	struct adapter *sc;
474 	struct ifreq *ifr = (struct ifreq *)data;
475 
476 	switch (cmd) {
477 	case SIOCSIFMTU:
478 	case SIOCSIFFLAGS:
479 	case SIOCADDMULTI:
480 	case SIOCDELMULTI:
481 	case SIOCSIFCAP:
482 		break;
483 	case SIOCSIFMEDIA:
484 	case SIOCGIFMEDIA:
485 	case SIOCGIFXMEDIA:
486 		sx_xlock(&t4_trace_lock);
487 		sc = if_getsoftc(ifp);
488 		if (sc == NULL)
489 			rc = EIO;
490 		else
491 			rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
492 		sx_xunlock(&t4_trace_lock);
493 		break;
494 	default:
495 		rc = ether_ioctl(ifp, cmd, data);
496 	}
497 
498 	return (rc);
499 }
500 
501 static int
502 tracer_transmit(if_t ifp, struct mbuf *m)
503 {
504 
505 	m_freem(m);
506 	return (0);
507 }
508 
509 static void
510 tracer_qflush(if_t ifp)
511 {
512 
513 	return;
514 }
515 
516 static int
517 tracer_media_change(if_t ifp)
518 {
519 
520 	return (EOPNOTSUPP);
521 }
522 
523 static void
524 tracer_media_status(if_t ifp, struct ifmediareq *ifmr)
525 {
526 
527 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
528 
529 	return;
530 }
531