1 /* packet-xip.c
2  * Routines for XIP dissection
3  *
4  * Wireshark - Network traffic analyzer
5  * By Gerald Combs <gerald@wireshark.org>
6  * Copyright 1998 Gerald Combs
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  *
10  * The eXpressive Internet Protocol (XIP) is the network layer protocol for
11  * the eXpressive Internet Architecture (XIA), a future Internet architecture
12  * project. The addresses in XIP are directed acyclic graphs, so some of the
13  * code in this file verifies the correctness of the DAGs and displays them
14  * in human-readable form.
15  *
16  * More information about XIA can be found here:
17  *  https://www.cs.cmu.edu/~xia/
18  *
19  * And here:
20  *  https://github.com/AltraMayor/XIA-for-Linux/wiki
21  *
22  * More information about the format of the DAG can be found here:
23  *  https://github.com/AltraMayor/XIA-for-Linux/wiki/Human-readable-XIP-address-format
24  */
25 
26 #include "config.h"
27 #include <epan/packet.h>
28 #include <epan/expert.h>
29 
30 void proto_register_xip(void);
31 void proto_reg_handoff_xip(void);
32 
33 /* Next dissector handles. */
34 static dissector_handle_t xip_serval_handle;
35 
36 static gint proto_xip			= -1;
37 
38 static gint hf_xip_version		= -1;
39 static gint hf_xip_next_hdr		= -1;
40 static gint hf_xip_payload_len		= -1;
41 static gint hf_xip_hop_limit		= -1;
42 static gint hf_xip_num_dst		= -1;
43 static gint hf_xip_num_src		= -1;
44 static gint hf_xip_last_node		= -1;
45 static gint hf_xip_dst_dag		= -1;
46 static gint hf_xip_dst_dag_entry	= -1;
47 static gint hf_xip_src_dag		= -1;
48 static gint hf_xip_src_dag_entry	= -1;
49 
50 static gint ett_xip_tree		= -1;
51 static gint ett_xip_ddag		= -1;
52 static gint ett_xip_sdag		= -1;
53 
54 static expert_field ei_xip_invalid_len = EI_INIT;
55 static expert_field ei_xip_next_header = EI_INIT;
56 static expert_field ei_xip_bad_num_dst = EI_INIT;
57 static expert_field ei_xip_bad_num_src = EI_INIT;
58 
59 static dissector_handle_t xip_handle;
60 
61 /* XIA principals. */
62 #define XIDTYPE_NAT		0x00
63 #define XIDTYPE_AD		0x10
64 #define XIDTYPE_HID		0x11
65 #define XIDTYPE_CID		0x12
66 #define XIDTYPE_SID		0x13
67 #define XIDTYPE_UNI4ID		0x14
68 #define XIDTYPE_I4ID		0x15
69 #define XIDTYPE_U4ID		0x16
70 #define XIDTYPE_XDP		0x17
71 #define XIDTYPE_SRVCID		0x18
72 #define XIDTYPE_FLOWID		0x19
73 #define XIDTYPE_ZF		0x20
74 
75 /* Principal string values. */
76 static const value_string xidtype_vals[] = {
77 	{ XIDTYPE_AD,		"ad" },
78 	{ XIDTYPE_HID,		"hid" },
79 	{ XIDTYPE_CID,		"cid" },
80 	{ XIDTYPE_SID,		"sid" },
81 	{ XIDTYPE_UNI4ID,	"uni4id" },
82 	{ XIDTYPE_I4ID,		"i4id" },
83 	{ XIDTYPE_U4ID,		"u4id" },
84 	{ XIDTYPE_XDP,		"xdp" },
85 	{ XIDTYPE_SRVCID,	"serval" },
86 	{ XIDTYPE_FLOWID,	"flowid" },
87 	{ XIDTYPE_ZF,		"zf" },
88 	{ 0,			NULL }
89 };
90 
91 enum xia_addr_error {
92 	/* There's a non-XIDTYPE_NAT node after an XIDTYPE_NAT node. */
93 	XIAEADDR_NAT_MISPLACED = 1,
94 	/* Edge-selected bit is only valid in packets. */
95 	XIAEADDR_CHOSEN_EDGE,
96 	/* There's a non-empty edge after an Empty Edge.
97 	 * This error can also occur if an empty edge is selected. */
98 	XIAEADDR_EE_MISPLACED,
99 	/* An edge of a node is out of range. */
100 	XIAEADDR_EDGE_OUT_RANGE,
101 	/* The nodes are not in topological order. Notice that being in
102 	 * topological guarantees that the graph is acyclic, and has a simple,
103 	 * cheap test. */
104 	XIAEADDR_NOT_TOPOLOGICAL,
105 	/* No single component. */
106 	XIAEADDR_MULTI_COMPONENTS,
107 	/* Entry node is not present. */
108 	XIAEADDR_NO_ENTRY
109 };
110 
111 /* Maximum number of nodes in a DAG. */
112 #define XIA_NODES_MAX		9
113 
114 /* Number of outgoing edges for each node. */
115 #define XIA_OUTDEGREE_MAX	4
116 
117 /* Sizes of an XIA node and its components. */
118 #define XIA_TYPE_SIZE		4
119 #define XIA_XID_SIZE		20
120 #define XIA_EDGES_SIZE		4
121 #define XIA_NODE_SIZE		(XIA_TYPE_SIZE + XIA_XID_SIZE + XIA_EDGES_SIZE)
122 
123 /* Split XID up into 4 byte chunks. */
124 #define XIA_XID_CHUNK_SIZE	4
125 
126 typedef guint32 xid_type_t;
127 
128 struct xia_xid {
129 	/* XID type. */
130 	xid_type_t	xid_type;
131 
132 	/* XID, represented as 4 byte ints. */
133 	guint32		xid_id[XIA_XID_SIZE / XIA_XID_CHUNK_SIZE];
134 };
135 
136 struct xia_row {
137 	struct xia_xid	s_xid;
138 	/* Outgoing edges. */
139 	union {
140 		guint8	a[XIA_OUTDEGREE_MAX];
141 		guint32	i;
142 	} s_edge;
143 };
144 
145 struct xia_addr {
146 	struct xia_row s_row[XIA_NODES_MAX];
147 };
148 
149 /* XIA_MAX_STRADDR_SIZE - The maximum size of an XIA address as a string
150  * in bytes. It's the recommended size to call xia_ntop with. It includes space
151  * for an invalid sign (i.e. '!'), the type and name of a nodes in
152  * hexadecimal, the out-edges, the two separators (i.e. '-') per node,
153  * the edge-chosen sign (i.e. '>') for each selected edge,
154  * the node separators (i.e. ':' or ":\n"), a string terminator (i.e. '\0'),
155  * and an extra '\n' at the end the caller may want to add.
156  */
157 #define MAX_PPAL_NAME_SIZE	32
158 #define XIA_MAX_STRID_SIZE	(XIA_XID_SIZE * 2 + 1)
159 #define XIA_MAX_STRXID_SIZE	(MAX_PPAL_NAME_SIZE + XIA_MAX_STRID_SIZE)
160 #define XIA_MAX_STRADDR_SIZE	(1 + XIA_NODES_MAX * \
161 	(XIA_MAX_STRXID_SIZE + XIA_OUTDEGREE_MAX * 2 + 2) + 1)
162 
163 /*
164  *	Validating addresses
165  */
166 
167 #define XIA_CHOSEN_EDGE		0x80
168 #define XIA_EMPTY_EDGE		0x7f
169 #define XIA_ENTRY_NODE_INDEX	0x7e
170 
171 #define XIA_EMPTY_EDGES (XIA_EMPTY_EDGE << 24 | XIA_EMPTY_EDGE << 16 |\
172 			 XIA_EMPTY_EDGE <<  8 | XIA_EMPTY_EDGE)
173 #define XIA_CHOSEN_EDGES (XIA_CHOSEN_EDGE << 24 | XIA_CHOSEN_EDGE << 16 |\
174 			 XIA_CHOSEN_EDGE <<  8 | XIA_CHOSEN_EDGE)
175 
176 static inline gint
is_edge_chosen(guint8 e)177 is_edge_chosen(guint8 e)
178 {
179 	return e & XIA_CHOSEN_EDGE;
180 }
181 
182 static inline gint
is_any_edge_chosen(const struct xia_row * row)183 is_any_edge_chosen(const struct xia_row *row)
184 {
185 	return row->s_edge.i & XIA_CHOSEN_EDGES;
186 }
187 
188 static inline gint
is_empty_edge(guint8 e)189 is_empty_edge(guint8 e)
190 {
191 	return (e & XIA_EMPTY_EDGE) == XIA_EMPTY_EDGE;
192 }
193 
194 static inline gint
xia_is_nat(xid_type_t ty)195 xia_is_nat(xid_type_t ty)
196 {
197 	return ty == XIDTYPE_NAT;
198 }
199 
200 static gint
xia_are_edges_valid(const struct xia_row * row,guint8 node,guint8 num_node,guint32 * pvisited)201 xia_are_edges_valid(const struct xia_row *row,
202 	guint8 node, guint8 num_node, guint32 *pvisited)
203 {
204 	const guint8 *edge;
205 	guint32 all_edges, bits;
206 	gint i;
207 
208 	if (is_any_edge_chosen(row)) {
209 		/* Since at least an edge of last_node has already
210 		 * been chosen, the address is corrupted.
211 		 */
212 		return -XIAEADDR_CHOSEN_EDGE;
213 	}
214 
215 	edge = row->s_edge.a;
216 	all_edges = g_ntohl(row->s_edge.i);
217 	bits = 0xffffffff;
218 	for (i = 0; i < XIA_OUTDEGREE_MAX; i++, edge++) {
219 		guint8 e;
220 		e = *edge;
221 		if (e == XIA_EMPTY_EDGE) {
222 			if ((all_edges & bits) !=
223 				(XIA_EMPTY_EDGES & bits))
224 				return -XIAEADDR_EE_MISPLACED;
225 			else
226 				break;
227 		} else if (e >= num_node) {
228 			return -XIAEADDR_EDGE_OUT_RANGE;
229 		} else if (node < (num_node - 1) && e <= node) {
230 			/* Notice that if (node == XIA_ENTRY_NODE_INDEX)
231 			 * it still works fine because XIA_ENTRY_NODE_INDEX
232 			 * is greater than (num_node - 1).
233 			 */
234 			return -XIAEADDR_NOT_TOPOLOGICAL;
235 		}
236 		bits >>= 8;
237 		*pvisited |= 1 << e;
238 	}
239 	return 0;
240 }
241 
242 static gint
xia_test_addr(const struct xia_addr * addr)243 xia_test_addr(const struct xia_addr *addr)
244 {
245 	gint i, n;
246 	gint saw_nat = 0;
247 	guint32 visited = 0;
248 
249 	/* Test that XIDTYPE_NAT is present only on last rows. */
250 	n = XIA_NODES_MAX;
251 	for (i = 0; i < XIA_NODES_MAX; i++) {
252 		xid_type_t ty;
253 		ty = addr->s_row[i].s_xid.xid_type;
254 		if (saw_nat) {
255 			if (!xia_is_nat(ty))
256 				return -XIAEADDR_NAT_MISPLACED;
257 		} else if (xia_is_nat(ty)) {
258 			n = i;
259 			saw_nat = 1;
260 		}
261 	}
262 	/* n = number of nodes from here. */
263 
264 	/* Test edges are well formed. */
265 	for (i = 0; i < n; i++) {
266 		gint rc;
267 		rc = xia_are_edges_valid(&addr->s_row[i], i, n, &visited);
268 		if (rc)
269 			return rc;
270 	}
271 
272 	if (n >= 1) {
273 		/* Test entry point is present. Notice that it's just a
274 		 * friendlier error since it's also XIAEADDR_MULTI_COMPONENTS.
275 		 */
276 		guint32 all_edges;
277 		all_edges = addr->s_row[n - 1].s_edge.i;
278 		if (all_edges == XIA_EMPTY_EDGES)
279 			return -XIAEADDR_NO_ENTRY;
280 
281 		if (visited != ((1U << n) - 1))
282 			return -XIAEADDR_MULTI_COMPONENTS;
283 	}
284 
285 	return n;
286 }
287 
288 /*
289  *	Printing addresses out
290  */
291 
292 #define INDEX_BASE 36
293 
294 static inline gchar
edge_to_char(guint8 e)295 edge_to_char(guint8 e)
296 {
297 	const gchar *ch_edge = "0123456789abcdefghijklmnopqrstuvwxyz";
298 	e &= ~XIA_CHOSEN_EDGE;
299 	if (e < INDEX_BASE)
300 		return ch_edge[e];
301 	else if (is_empty_edge(e))
302 		return '*';
303 	else
304 		return '+';
305 }
306 
307 static void
add_edges_to_buf(gint valid,wmem_strbuf_t * buf,const guint8 * edges)308 add_edges_to_buf(gint valid, wmem_strbuf_t *buf, const guint8 *edges)
309 {
310 	gint i;
311 	wmem_strbuf_append_c(buf, '-');
312 	for (i = 0; i < XIA_OUTDEGREE_MAX; i++) {
313 		if (valid && edges[i] == XIA_EMPTY_EDGE)
314 			return;
315 
316 		if (is_edge_chosen(edges[i]))
317 			wmem_strbuf_append_c(buf, '>');
318 
319 		wmem_strbuf_append_c(buf, edge_to_char(edges[i]));
320 	}
321 }
322 
323 static void
add_type_to_buf(xid_type_t ty,wmem_strbuf_t * buf)324 add_type_to_buf(xid_type_t ty, wmem_strbuf_t *buf)
325 {
326 	const gchar *xid_name;
327 	gsize buflen = wmem_strbuf_get_len(buf);
328 
329 	if (XIA_MAX_STRADDR_SIZE - buflen - 1 < MAX_PPAL_NAME_SIZE)
330 		return;
331 
332 	xid_name = try_val_to_str(ty, xidtype_vals);
333 	if (xid_name)
334 		wmem_strbuf_append_printf(buf, "%s-", xid_name);
335 	else
336 		wmem_strbuf_append_printf(buf, "0x%x-", ty);
337 }
338 
339 static inline void
add_id_to_buf(const struct xia_xid * src,wmem_strbuf_t * buf)340 add_id_to_buf(const struct xia_xid *src, wmem_strbuf_t *buf)
341 {
342 	wmem_strbuf_append_printf(buf, "%08x%08x%08x%08x%08x",
343 		src->xid_id[0],
344 		src->xid_id[1],
345 		src->xid_id[2],
346 		src->xid_id[3],
347 		src->xid_id[4]);
348 }
349 
350 /* xia_ntop - convert an XIA address to a string.
351  * @src can be ill-formed, but xia_ntop won't report an error and will return
352  * a string that approximates that ill-formed address.
353  */
354 static int
xia_ntop(const struct xia_addr * src,wmem_strbuf_t * buf)355 xia_ntop(const struct xia_addr *src, wmem_strbuf_t *buf)
356 {
357 	gint valid, i;
358 
359 	valid = xia_test_addr(src) >= 1;
360 	if (!valid)
361 		wmem_strbuf_append_c(buf, '!');
362 
363 	for (i = 0; i < XIA_NODES_MAX; i++) {
364 		const struct xia_row *row = &src->s_row[i];
365 
366 		if (xia_is_nat(row->s_xid.xid_type))
367 			break;
368 
369 		if (i > 0)
370 			wmem_strbuf_append(buf, ":\n");
371 
372 		/* Add the type, ID, and edges for this node. */
373 		add_type_to_buf(row->s_xid.xid_type, buf);
374 		add_id_to_buf(&row->s_xid, buf);
375 		add_edges_to_buf(valid, buf, row->s_edge.a);
376 	}
377 
378 	return 0;
379 }
380 
381 /*
382  *	Dissection
383  */
384 
385 #define XIPH_MIN_LEN		36
386 #define ETHERTYPE_XIP		0xC0DE
387 #define XIA_NEXT_HEADER_DATA	0
388 
389 /* Offsets of XIP fields in bytes. */
390 #define XIPH_VERS		0
391 #define XIPH_NXTH		1
392 #define XIPH_PLEN		2
393 #define XIPH_HOPL		4
394 #define XIPH_NDST		5
395 #define XIPH_NSRC		6
396 #define XIPH_LSTN		7
397 #define XIPH_DSTD		8
398 
399 static void
construct_dag(tvbuff_t * tvb,packet_info * pinfo,proto_tree * xip_tree,const gint ett,const gint hf,const gint hf_entry,const guint8 num_nodes,guint8 offset)400 construct_dag(tvbuff_t *tvb, packet_info *pinfo, proto_tree *xip_tree,
401 	const gint ett, const gint hf, const gint hf_entry,
402 	const guint8 num_nodes, guint8 offset)
403 {
404 	proto_tree *dag_tree;
405 	proto_item *ti;
406 	struct xia_addr dag;
407 	wmem_strbuf_t *buf;
408 	const gchar *dag_str;
409 	guint i, j;
410 	guint8 dag_offset = offset;
411 
412 	ti = proto_tree_add_item(xip_tree, hf, tvb, offset,
413 		num_nodes * XIA_NODE_SIZE, ENC_BIG_ENDIAN);
414 
415 	buf = wmem_strbuf_sized_new(pinfo->pool,
416 		XIA_MAX_STRADDR_SIZE, XIA_MAX_STRADDR_SIZE);
417 
418 	dag_tree = proto_item_add_subtree(ti, ett);
419 
420 	memset(&dag, 0, sizeof(dag));
421 	for (i = 0; i < num_nodes; i++) {
422 		struct xia_row *row = &dag.s_row[i];
423 
424 		row->s_xid.xid_type = tvb_get_ntohl(tvb, offset);
425 		offset += XIA_TYPE_SIZE;
426 
427 		/* Process the ID 32 bits at a time. */
428 		for (j = 0; j < XIA_XID_SIZE / XIA_XID_CHUNK_SIZE; j++) {
429 			row->s_xid.xid_id[j] = tvb_get_ntohl(tvb, offset);
430 			offset += XIA_XID_CHUNK_SIZE;
431 		}
432 
433 		/* Need to process the edges byte-by-byte,
434 		 * so keep the bytes in network order.
435 		 */
436 		tvb_memcpy(tvb, row->s_edge.a, offset, XIA_EDGES_SIZE);
437 		offset += XIA_EDGES_SIZE;
438 	}
439 
440 	xia_ntop(&dag, buf);
441 	dag_str = wmem_strbuf_get_str(buf);
442 	proto_tree_add_string_format(dag_tree, hf_entry, tvb, dag_offset,
443 		XIA_NODE_SIZE * num_nodes, dag_str, "%s", dag_str);
444 }
445 
446 static gint
dissect_xip_sink_node(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree,gint offset,guint8 sink_node)447 dissect_xip_sink_node(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
448 	gint offset, guint8 sink_node)
449 {
450 	tvbuff_t *next_tvb;
451 
452 	switch (sink_node) {
453 	/* Serval XID types. */
454 	case XIDTYPE_FLOWID:
455 	case XIDTYPE_SRVCID:
456 		next_tvb = tvb_new_subset_remaining(tvb, offset);
457 		return call_dissector(xip_serval_handle, next_tvb, pinfo, tree);
458 	/* No special sink processing. */
459 	default:
460 		return 0;
461 	}
462 }
463 
464 static gint
dissect_xip_next_header(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree,proto_item * next_ti,gint offset)465 dissect_xip_next_header(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
466 	proto_item *next_ti, gint offset)
467 {
468 	tvbuff_t *next_tvb;
469 	guint8 next_header = tvb_get_guint8(tvb, XIPH_NXTH);
470 
471 	switch (next_header) {
472 	case XIA_NEXT_HEADER_DATA:
473 		next_tvb = tvb_new_subset_remaining(tvb, offset);
474 		return call_data_dissector(next_tvb, pinfo, tree);
475 	default:
476 		expert_add_info_format(pinfo, next_ti, &ei_xip_next_header,
477 		 "Unrecognized next header type: 0x%02x", next_header);
478 		return 0;
479 	}
480 }
481 
482 static void
display_xip(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree)483 display_xip(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
484 {
485 	proto_tree *xip_tree = NULL;
486 
487 	proto_item *ti = NULL;
488 	proto_item *payload_ti = NULL;
489 	proto_item *next_ti = NULL;
490 	proto_item *num_ti = NULL;
491 
492 	gint offset;
493 	guint16 xiph_len, payload_len;
494 	guint8 num_dst_nodes, num_src_nodes, last_node;
495 
496 	num_dst_nodes = tvb_get_guint8(tvb, XIPH_NDST);
497 	num_src_nodes = tvb_get_guint8(tvb, XIPH_NSRC);
498 	xiph_len = 8 + (XIA_NODE_SIZE * num_dst_nodes) +
499 		(XIA_NODE_SIZE * num_src_nodes);
500 
501 	/* Construct protocol tree. */
502 	ti = proto_tree_add_item(tree, proto_xip, tvb, 0, xiph_len, ENC_NA);
503 	xip_tree = proto_item_add_subtree(ti, ett_xip_tree);
504 
505 	/* Add XIP version. */
506 	proto_tree_add_item(xip_tree, hf_xip_version, tvb,
507 		XIPH_VERS, 1, ENC_BIG_ENDIAN);
508 
509 	/* Add XIP next header. */
510 	next_ti = proto_tree_add_item(xip_tree, hf_xip_next_hdr, tvb,
511 		XIPH_NXTH, 1, ENC_BIG_ENDIAN);
512 
513 	/* Add XIP payload length. */
514 	payload_len = tvb_get_ntohs(tvb, XIPH_PLEN);
515 	payload_ti = proto_tree_add_uint_format(xip_tree, hf_xip_payload_len,
516 		tvb, XIPH_PLEN, 2, payload_len, "Payload Length: %u bytes",
517 		payload_len);
518 	if (tvb_captured_length_remaining(tvb, xiph_len) != payload_len)
519 		expert_add_info_format(pinfo, payload_ti, &ei_xip_invalid_len,
520 		"Payload length field (%d bytes) does not match actual payload length (%d bytes)",
521 		payload_len, tvb_captured_length_remaining(tvb, xiph_len));
522 
523 	/* Add XIP hop limit. */
524 	proto_tree_add_item(xip_tree, hf_xip_hop_limit, tvb,
525 		XIPH_HOPL, 1, ENC_BIG_ENDIAN);
526 
527 	/* Add XIP number of destination DAG nodes. */
528 	num_ti = proto_tree_add_item(xip_tree, hf_xip_num_dst, tvb,
529 		XIPH_NDST, 1, ENC_BIG_ENDIAN);
530 	if (num_dst_nodes > XIA_NODES_MAX) {
531 		expert_add_info_format(pinfo, num_ti, &ei_xip_bad_num_dst,
532 		"The number of destination DAG nodes (%d) must be less than XIA_NODES_MAX (%d)",
533 		num_dst_nodes, XIA_NODES_MAX);
534 		num_dst_nodes = XIA_NODES_MAX;
535 	}
536 
537 	/* Add XIP number of source DAG nodes. */
538 	num_ti = proto_tree_add_item(xip_tree, hf_xip_num_src, tvb,
539 		XIPH_NSRC, 1, ENC_BIG_ENDIAN);
540 	if (num_src_nodes > XIA_NODES_MAX) {
541 		expert_add_info_format(pinfo, num_ti, &ei_xip_bad_num_src,
542 		"The number of source DAG nodes (%d) must be less than XIA_NODES_MAX (%d)",
543 		num_src_nodes, XIA_NODES_MAX);
544 		num_src_nodes = XIA_NODES_MAX;
545 	}
546 
547 	/* Add XIP last node. */
548 	last_node = tvb_get_guint8(tvb, XIPH_LSTN);
549 	proto_tree_add_uint_format_value(xip_tree, hf_xip_last_node, tvb,
550 		XIPH_LSTN, 1, last_node, "%d%s", last_node,
551 		last_node == XIA_ENTRY_NODE_INDEX ? " (entry node)" : "");
552 
553 	/* Construct Destination DAG subtree. */
554 	if (num_dst_nodes > 0)
555 		construct_dag(tvb, pinfo, xip_tree, ett_xip_ddag,
556 			hf_xip_dst_dag, hf_xip_dst_dag_entry,
557 			num_dst_nodes, XIPH_DSTD);
558 
559 	/* Construct Source DAG subtree. */
560 	if (num_src_nodes > 0)
561 		construct_dag(tvb, pinfo, xip_tree, ett_xip_sdag,
562 			hf_xip_src_dag, hf_xip_src_dag_entry,
563 			num_src_nodes,
564 			XIPH_DSTD + num_dst_nodes * XIA_NODE_SIZE);
565 
566 	/* First byte after XIP header. */
567 	offset = XIPH_DSTD + XIA_NODE_SIZE * (num_dst_nodes + num_src_nodes);
568 
569 	/* Dissect other headers according to the sink node, if needed. */
570 	offset += dissect_xip_sink_node(tvb, pinfo, tree, offset,
571 			tvb_get_ntohl(tvb, XIPH_DSTD +
572 			(num_dst_nodes - 1) * XIA_NODE_SIZE));
573 
574 	dissect_xip_next_header(tvb, pinfo, tree, next_ti, offset);
575 }
576 
577 static gint
dissect_xip(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree,void * data _U_)578 dissect_xip(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
579 	void *data _U_)
580 {
581 	/* Not large enough to be valid XIP packet. */
582 	if (tvb_reported_length(tvb) < XIPH_MIN_LEN)
583 		return 0;
584 
585 	col_set_str(pinfo->cinfo, COL_PROTOCOL, "XIP");
586 	col_set_str(pinfo->cinfo, COL_INFO, "XIP Packet");
587 
588 	display_xip(tvb, pinfo, tree);
589 	return tvb_captured_length(tvb);
590 }
591 
592 void
proto_register_xip(void)593 proto_register_xip(void)
594 {
595 	static hf_register_info hf[] = {
596 
597 		/* XIP Header. */
598 
599 		{ &hf_xip_version,
600 		{ "Version", "xip.version", FT_UINT8,
601 		   BASE_DEC, NULL, 0x0,	NULL, HFILL }},
602 
603 		{ &hf_xip_next_hdr,
604 		{ "Next Header", "xip.next_hdr", FT_UINT8,
605 		   BASE_DEC, NULL, 0x0, NULL, HFILL }},
606 
607 		{ &hf_xip_payload_len,
608 		{ "Payload Length", "xip.payload_len", FT_UINT16,
609 		   BASE_DEC, NULL, 0x0,	NULL, HFILL }},
610 
611 		{ &hf_xip_hop_limit,
612 		{ "Hop Limit", "xip.hop_limit", FT_UINT8,
613 		   BASE_DEC, NULL, 0x0,	NULL, HFILL }},
614 
615 		{ &hf_xip_num_dst,
616 		{ "Number of Destination Nodes", "xip.num_dst", FT_UINT8,
617 		   BASE_DEC, NULL, 0x0, NULL, HFILL }},
618 
619 		{ &hf_xip_num_src,
620 		{ "Number of Source Nodes", "xip.num_src", FT_UINT8,
621 		   BASE_DEC, NULL, 0x0, NULL, HFILL }},
622 
623 		{ &hf_xip_last_node,
624 		{ "Last Node", "xip.last_node", FT_UINT8,
625 		   BASE_DEC, NULL, 0x0, NULL, HFILL }},
626 
627 		{ &hf_xip_dst_dag,
628 		{ "Destination DAG", "xip.dst_dag", FT_NONE,
629 		   BASE_NONE, NULL, 0x0, NULL, HFILL }},
630 
631 		{ &hf_xip_dst_dag_entry,
632 		{ "Destination DAG Entry", "xip.dst_dag_entry", FT_STRING,
633 		   BASE_NONE, NULL, 0x0, NULL, HFILL }},
634 
635 		{ &hf_xip_src_dag,
636 		{ "Source DAG", "xip.src_dag", FT_NONE,
637 		   BASE_NONE, NULL, 0x0, NULL, HFILL }},
638 
639 		{ &hf_xip_src_dag_entry,
640 		{ "Source DAG Entry", "xip.src_dag_entry", FT_STRING,
641 		   BASE_NONE, NULL, 0x0, NULL, HFILL }}
642 	};
643 
644 	static gint *ett[] = {
645 		&ett_xip_tree,
646 		&ett_xip_ddag,
647 		&ett_xip_sdag
648 	};
649 
650 	static ei_register_info ei[] = {
651 		{ &ei_xip_invalid_len,
652 		{ "xip.invalid.len", PI_MALFORMED, PI_ERROR,
653 		  "Invalid length", EXPFILL }},
654 
655 		{ &ei_xip_next_header,
656 		{ "xip.next.header", PI_MALFORMED, PI_ERROR,
657 		  "Invalid next header", EXPFILL }},
658 
659 		{ &ei_xip_bad_num_dst,
660 		{ "xip.bad_num_dst", PI_MALFORMED, PI_ERROR,
661 		  "Invalid number of destination DAG nodes", EXPFILL }},
662 
663 		{ &ei_xip_bad_num_src,
664 		{ "xip.bad_num_src", PI_MALFORMED, PI_ERROR,
665 		  "Invalid number of source DAG nodes", EXPFILL }}
666 	};
667 
668 	expert_module_t* expert_xip;
669 
670 	proto_xip = proto_register_protocol(
671 		"eXpressive Internet Protocol",
672 		"XIP",
673 		"xip");
674 
675 	xip_handle = register_dissector("xip", dissect_xip, proto_xip);
676 	proto_register_field_array(proto_xip, hf, array_length(hf));
677 	proto_register_subtree_array(ett, array_length(ett));
678 
679 	expert_xip = expert_register_protocol(proto_xip);
680 	expert_register_field_array(expert_xip, ei, array_length(ei));
681 }
682 
683 void
proto_reg_handoff_xip(void)684 proto_reg_handoff_xip(void)
685 {
686 	dissector_add_uint("ethertype", ETHERTYPE_XIP, xip_handle);
687 
688 	xip_serval_handle = find_dissector_add_dependency("xipserval", proto_xip);
689 }
690 
691 /*
692  * Editor modelines  -  https://www.wireshark.org/tools/modelines.html
693  *
694  * Local variables:
695  * c-basic-offset: 8
696  * tab-width: 8
697  * indent-tabs-mode: t
698  * End:
699  *
700  * vi: set shiftwidth=8 tabstop=8 noexpandtab:
701  * :indentSize=8:tabSize=8:noTabs=false:
702  */
703