xref: /openbsd/usr.sbin/bgpd/rde.h (revision 9c7bd2b0)
1 /*	$OpenBSD: rde.h,v 1.312 2025/01/09 12:16:21 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org> and
5  *                          Andre Oppermann <oppermann@networx.ch>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __RDE_H__
20 #define __RDE_H__
21 
22 #include <sys/types.h>
23 #include <sys/queue.h>
24 #include <sys/tree.h>
25 #include <stdint.h>
26 #include <stddef.h>
27 
28 #include "bgpd.h"
29 #include "log.h"
30 
31 /* rde internal structures */
32 
33 enum peer_state {
34 	PEER_NONE,
35 	PEER_DOWN,
36 	PEER_UP,
37 	PEER_ERR	/* error occurred going to PEER_DOWN state */
38 };
39 
40 LIST_HEAD(prefix_list, prefix);
41 TAILQ_HEAD(prefix_queue, prefix);
42 RB_HEAD(rib_tree, rib_entry);
43 
44 struct rib_entry {
45 	RB_ENTRY(rib_entry)	 rib_e;
46 	struct prefix_queue	 prefix_h;
47 	struct pt_entry		*prefix;
48 	uint16_t		 rib_id;
49 	uint16_t		 lock;
50 };
51 
52 struct rib {
53 	struct rib_tree		tree;
54 	char			name[PEER_DESCR_LEN];
55 	struct filter_head	*in_rules;
56 	struct filter_head	*in_rules_tmp;
57 	u_int			rtableid;
58 	u_int			rtableid_tmp;
59 	enum reconf_action	state, fibstate;
60 	uint16_t		id;
61 	uint16_t		flags;
62 	uint16_t		flags_tmp;
63 };
64 
65 #define RIB_ADJ_IN	0
66 #define RIB_LOC_START	1
67 #define RIB_NOTFOUND	0xffff
68 
69 /*
70  * How do we identify peers between the session handler and the rde?
71  * Currently I assume that we can do that with the neighbor_ip...
72  */
73 RB_HEAD(peer_tree, rde_peer);
74 RB_HEAD(prefix_tree, prefix);
75 RB_HEAD(prefix_index, prefix);
76 struct iq;
77 
78 struct rde_peer {
79 	RB_ENTRY(rde_peer)		 entry;
80 	SIMPLEQ_HEAD(, iq)		 imsg_queue;
81 	struct peer_config		 conf;
82 	struct rde_peer_stats		 stats;
83 	struct bgpd_addr		 remote_addr;
84 	struct bgpd_addr		 local_v4_addr;
85 	struct bgpd_addr		 local_v6_addr;
86 	struct capabilities		 capa;
87 	struct addpath_eval		 eval;
88 	struct prefix_index		 adj_rib_out;
89 	struct prefix_tree		 updates[AID_MAX];
90 	struct prefix_tree		 withdraws[AID_MAX];
91 	struct filter_head		*out_rules;
92 	time_t				 staletime[AID_MAX];
93 	uint32_t			 remote_bgpid;
94 	uint32_t			 path_id_tx;
95 	unsigned int			 local_if_scope;
96 	enum peer_state			 state;
97 	enum export_type		 export_type;
98 	enum role			 role;
99 	uint16_t			 loc_rib_id;
100 	uint16_t			 short_as;
101 	uint16_t			 mrt_idx;
102 	uint8_t				 recv_eor;	/* bitfield per AID */
103 	uint8_t				 sent_eor;	/* bitfield per AID */
104 	uint8_t				 reconf_out;	/* out filter changed */
105 	uint8_t				 reconf_rib;	/* rib changed */
106 	uint8_t				 throttled;
107 	uint8_t				 flags;
108 };
109 
110 struct rde_aspa;
111 struct rde_aspa_state {
112 	uint8_t		onlyup;
113 	uint8_t		downup;
114 };
115 
116 #define AS_SET			1
117 #define AS_SEQUENCE		2
118 #define AS_CONFED_SEQUENCE	3
119 #define AS_CONFED_SET		4
120 #define ASPATH_HEADER_SIZE	(offsetof(struct aspath, data))
121 
122 struct aspath {
123 	uint32_t		source_as;	/* cached source_as */
124 	uint16_t		len;	/* total length of aspath in octets */
125 	uint16_t		ascnt;	/* number of AS hops in data */
126 	u_char			data[1]; /* placeholder for actual data */
127 };
128 
129 enum attrtypes {
130 	ATTR_UNDEF,
131 	ATTR_ORIGIN,
132 	ATTR_ASPATH,
133 	ATTR_NEXTHOP,
134 	ATTR_MED,
135 	ATTR_LOCALPREF,
136 	ATTR_ATOMIC_AGGREGATE,
137 	ATTR_AGGREGATOR,
138 	ATTR_COMMUNITIES,
139 	ATTR_ORIGINATOR_ID,
140 	ATTR_CLUSTER_LIST,
141 	ATTR_MP_REACH_NLRI=14,
142 	ATTR_MP_UNREACH_NLRI=15,
143 	ATTR_EXT_COMMUNITIES=16,
144 	ATTR_AS4_PATH=17,
145 	ATTR_AS4_AGGREGATOR=18,
146 	ATTR_PMSI_TUNNEL=22,
147 	ATTR_LARGE_COMMUNITIES=32,
148 	ATTR_OTC=35,
149 	ATTR_FIRST_UNKNOWN,	/* after this all attributes are unknown */
150 };
151 
152 /* attribute flags. 4 low order bits reserved */
153 #define	ATTR_EXTLEN		0x10
154 #define ATTR_PARTIAL		0x20
155 #define ATTR_TRANSITIVE		0x40
156 #define ATTR_OPTIONAL		0x80
157 #define ATTR_RESERVED		0x0f
158 /* by default mask the reserved bits and the ext len bit */
159 #define ATTR_DEFMASK		(ATTR_RESERVED | ATTR_EXTLEN)
160 
161 /* default attribute flags for well-known attributes */
162 #define ATTR_WELL_KNOWN		ATTR_TRANSITIVE
163 
164 struct attr {
165 	RB_ENTRY(attr)			 entry;
166 	u_char				*data;
167 	int				 refcnt;
168 	uint16_t			 len;
169 	uint8_t				 flags;
170 	uint8_t				 type;
171 };
172 
173 struct rde_community {
174 	RB_ENTRY(rde_community)		entry;
175 	int				size;
176 	int				nentries;
177 	int				flags;
178 	int				refcnt;
179 	struct community		*communities;
180 };
181 
182 #define	PARTIAL_COMMUNITIES		0x01
183 #define	PARTIAL_LARGE_COMMUNITIES	0x02
184 #define	PARTIAL_EXT_COMMUNITIES		0x04
185 
186 #define	F_ATTR_ORIGIN		0x00001
187 #define	F_ATTR_ASPATH		0x00002
188 #define	F_ATTR_NEXTHOP		0x00004
189 #define	F_ATTR_LOCALPREF	0x00008
190 #define	F_ATTR_MED		0x00010
191 #define	F_ATTR_MED_ANNOUNCE	0x00020
192 #define	F_ATTR_MP_REACH		0x00040
193 #define	F_ATTR_MP_UNREACH	0x00080
194 #define	F_ATTR_AS4BYTE_NEW	0x00100	/* AS4_PATH or AS4_AGGREGATOR */
195 #define	F_ATTR_LOOP		0x00200 /* path would cause a route loop */
196 #define	F_PREFIX_ANNOUNCED	0x00400
197 #define	F_ANN_DYNAMIC		0x00800
198 #define	F_ATTR_OTC		0x01000	/* OTC present */
199 #define	F_ATTR_OTC_LEAK		0x02000 /* otc leak, not eligible */
200 #define	F_ATTR_PARSE_ERR	0x10000 /* parse error, not eligible */
201 #define	F_ATTR_LINKED		0x20000 /* if set path is on various lists */
202 
203 #define ORIGIN_IGP		0
204 #define ORIGIN_EGP		1
205 #define ORIGIN_INCOMPLETE	2
206 
207 #define DEFAULT_LPREF		100
208 
209 struct rde_aspath {
210 	RB_ENTRY(rde_aspath)		 entry;
211 	struct attr			**others;
212 	struct aspath			*aspath;
213 	struct rde_aspa_state		 aspa_state;
214 	int				 refcnt;
215 	uint32_t			 flags;		/* internally used */
216 	uint32_t			 med;		/* multi exit disc */
217 	uint32_t			 lpref;		/* local pref */
218 	uint32_t			 weight;	/* low prio lpref */
219 	uint16_t			 rtlabelid;	/* route label id */
220 	uint16_t			 pftableid;	/* pf table id */
221 	uint8_t				 origin;
222 	uint8_t				 others_len;
223 	uint8_t				 aspa_generation;
224 };
225 
226 enum nexthop_state {
227 	NEXTHOP_LOOKUP,
228 	NEXTHOP_UNREACH,
229 	NEXTHOP_REACH,
230 	NEXTHOP_FLAPPED		/* only used by oldstate */
231 };
232 
233 struct nexthop {
234 	RB_ENTRY(nexthop)	entry;
235 	TAILQ_ENTRY(nexthop)	runner_l;
236 	struct prefix_list	prefix_h;
237 	struct prefix		*next_prefix;
238 	struct bgpd_addr	exit_nexthop;
239 	struct bgpd_addr	true_nexthop;
240 	struct bgpd_addr	nexthop_net;
241 #if 0
242 	/*
243 	 * currently we use the boolean nexthop state, this could be exchanged
244 	 * with a variable cost with a max for unreachable.
245 	 */
246 	uint32_t		costs;
247 #endif
248 	int			refcnt;
249 	enum nexthop_state	state;
250 	enum nexthop_state	oldstate;
251 	uint8_t			nexthop_netlen;
252 	uint8_t			flags;
253 #define NEXTHOP_CONNECTED	0x01
254 };
255 
256 /* generic entry without address specific part */
257 struct pt_entry {
258 	RB_ENTRY(pt_entry)		 pt_e;
259 	uint8_t				 aid;
260 	uint8_t				 prefixlen;
261 	uint16_t			 len;
262 	uint32_t			 refcnt;
263 	uint8_t				 data[4]; /* data depending on aid */
264 };
265 
266 struct prefix {
267 	union {
268 		struct {
269 			TAILQ_ENTRY(prefix)	 rib;
270 			LIST_ENTRY(prefix)	 nexthop;
271 			struct rib_entry	*re;
272 		} list;
273 		struct {
274 			RB_ENTRY(prefix)	 index, update;
275 		} tree;
276 	}				 entry;
277 	struct pt_entry			*pt;
278 	struct rde_aspath		*aspath;
279 	struct rde_community		*communities;
280 	struct rde_peer			*peer;
281 	struct nexthop			*nexthop;	/* may be NULL */
282 	time_t				 lastchange;
283 	uint32_t			 path_id;
284 	uint32_t			 path_id_tx;
285 	uint16_t			 flags;
286 	uint8_t				 validation_state;
287 	uint8_t				 nhflags;
288 	int8_t				 dmetric;	/* decision metric */
289 };
290 #define	PREFIX_FLAG_WITHDRAW	0x0001	/* enqueued on withdraw queue */
291 #define	PREFIX_FLAG_UPDATE	0x0002	/* enqueued on update queue */
292 #define	PREFIX_FLAG_DEAD	0x0004	/* locked but removed */
293 #define	PREFIX_FLAG_STALE	0x0008	/* stale entry (for addpath) */
294 #define	PREFIX_FLAG_MASK	0x000f	/* mask for the prefix types */
295 #define	PREFIX_FLAG_ADJOUT	0x0010	/* prefix is in the adj-out rib */
296 #define	PREFIX_FLAG_EOR		0x0020	/* prefix is EoR */
297 #define	PREFIX_NEXTHOP_LINKED	0x0040	/* prefix is linked onto nexthop list */
298 #define	PREFIX_FLAG_LOCKED	0x0080	/* locked by rib walker */
299 #define	PREFIX_FLAG_FILTERED	0x0100	/* prefix is filtered (ineligible) */
300 
301 #define	PREFIX_DMETRIC_NONE	0
302 #define	PREFIX_DMETRIC_INVALID	1
303 #define	PREFIX_DMETRIC_VALID	2
304 #define	PREFIX_DMETRIC_AS_WIDE	3
305 #define	PREFIX_DMETRIC_ECMP	4
306 #define	PREFIX_DMETRIC_BEST	5
307 
308 /* possible states for nhflags */
309 #define	NEXTHOP_SELF		0x01
310 #define	NEXTHOP_REJECT		0x02
311 #define	NEXTHOP_BLACKHOLE	0x04
312 #define	NEXTHOP_NOMODIFY	0x08
313 #define	NEXTHOP_MASK		0x0f
314 #define	NEXTHOP_VALID		0x80
315 
316 struct filterstate {
317 	struct rde_aspath	 aspath;
318 	struct rde_community	 communities;
319 	struct nexthop		*nexthop;
320 	uint8_t			 nhflags;
321 	uint8_t			 vstate;
322 };
323 
324 enum eval_mode {
325 	EVAL_DEFAULT,
326 	EVAL_ALL,
327 	EVAL_RECONF,
328 };
329 
330 extern struct rde_memstats rdemem;
331 
332 /* prototypes */
333 /* mrt.c */
334 int		mrt_dump_v2_hdr(struct mrt *, struct bgpd_config *);
335 void		mrt_dump_upcall(struct rib_entry *, void *);
336 
337 /* rde.c */
338 void		 rde_update_err(struct rde_peer *, uint8_t , uint8_t,
339 		    struct ibuf *);
340 void		 rde_update_log(const char *, uint16_t,
341 		    const struct rde_peer *, const struct bgpd_addr *,
342 		    const struct bgpd_addr *, uint8_t);
343 void		rde_send_kroute_flush(struct rib *);
344 void		rde_send_kroute(struct rib *, struct prefix *, struct prefix *);
345 void		rde_send_nexthop(struct bgpd_addr *, int);
346 void		rde_pftable_add(uint16_t, struct prefix *);
347 void		rde_pftable_del(uint16_t, struct prefix *);
348 
349 int		rde_evaluate_all(void);
350 uint32_t	rde_local_as(void);
351 int		rde_decisionflags(void);
352 void		rde_peer_send_rrefresh(struct rde_peer *, uint8_t, uint8_t);
353 int		rde_match_peer(struct rde_peer *, struct ctl_neighbor *);
354 
355 /* rde_peer.c */
356 int		 peer_has_as4byte(struct rde_peer *);
357 int		 peer_has_add_path(struct rde_peer *, uint8_t, int);
358 int		 peer_has_ext_msg(struct rde_peer *);
359 int		 peer_has_ext_nexthop(struct rde_peer *, uint8_t);
360 int		 peer_accept_no_as_set(struct rde_peer *);
361 void		 peer_init(struct filter_head *);
362 void		 peer_shutdown(void);
363 void		 peer_foreach(void (*)(struct rde_peer *, void *), void *);
364 struct rde_peer	*peer_get(uint32_t);
365 struct rde_peer *peer_match(struct ctl_neighbor *, uint32_t);
366 struct rde_peer	*peer_add(uint32_t, struct peer_config *, struct filter_head *);
367 struct filter_head	*peer_apply_out_filter(struct rde_peer *,
368 			    struct filter_head *);
369 
370 void		 rde_generate_updates(struct rib_entry *, struct prefix *,
371 		    struct prefix *, enum eval_mode);
372 
373 void		 peer_up(struct rde_peer *, struct session_up *);
374 void		 peer_down(struct rde_peer *);
375 void		 peer_delete(struct rde_peer *);
376 void		 peer_flush(struct rde_peer *, uint8_t, time_t);
377 void		 peer_stale(struct rde_peer *, uint8_t, int);
378 void		 peer_blast(struct rde_peer *, uint8_t);
379 void		 peer_dump(struct rde_peer *, uint8_t);
380 void		 peer_begin_rrefresh(struct rde_peer *, uint8_t);
381 int		 peer_work_pending(void);
382 void		 peer_reaper(struct rde_peer *);
383 
384 void		 peer_imsg_push(struct rde_peer *, struct imsg *);
385 int		 peer_imsg_pop(struct rde_peer *, struct imsg *);
386 void		 peer_imsg_flush(struct rde_peer *);
387 
388 static inline int
389 peer_is_up(struct rde_peer *peer)
390 {
391 	return (peer->state == PEER_UP);
392 }
393 
394 RB_PROTOTYPE(peer_tree, rde_peer, entry, peer_cmp);
395 
396 /* rde_attr.c */
397 int		 attr_writebuf(struct ibuf *, uint8_t, uint8_t, void *,
398 		    uint16_t);
399 void		 attr_shutdown(void);
400 int		 attr_optadd(struct rde_aspath *, uint8_t, uint8_t,
401 		    void *, uint16_t);
402 struct attr	*attr_optget(const struct rde_aspath *, uint8_t);
403 void		 attr_copy(struct rde_aspath *, const struct rde_aspath *);
404 int		 attr_compare(struct rde_aspath *, struct rde_aspath *);
405 void		 attr_freeall(struct rde_aspath *);
406 void		 attr_free(struct rde_aspath *, struct attr *);
407 
408 struct aspath	*aspath_get(void *, uint16_t);
409 struct aspath	*aspath_copy(struct aspath *);
410 void		 aspath_put(struct aspath *);
411 u_char		*aspath_deflate(u_char *, uint16_t *, int *);
412 void		 aspath_merge(struct rde_aspath *, struct attr *);
413 uint32_t	 aspath_neighbor(struct aspath *);
414 int		 aspath_loopfree(struct aspath *, uint32_t);
415 int		 aspath_compare(struct aspath *, struct aspath *);
416 int		 aspath_match(struct aspath *, struct filter_as *, uint32_t);
417 u_char		*aspath_prepend(struct aspath *, uint32_t, int, uint16_t *);
418 u_char		*aspath_override(struct aspath *, uint32_t, uint32_t,
419 		    uint16_t *);
420 int		 aspath_lenmatch(struct aspath *, enum aslen_spec, u_int);
421 
422 static inline u_char *
423 aspath_dump(struct aspath *aspath)
424 {
425 	return (aspath->data);
426 }
427 
428 static inline uint16_t
429 aspath_length(struct aspath *aspath)
430 {
431 	return (aspath->len);
432 }
433 
434 static inline uint32_t
435 aspath_origin(struct aspath *aspath)
436 {
437 	return (aspath->source_as);
438 }
439 
440 /* rde_community.c */
441 int	community_match(struct rde_community *, struct community *,
442 	    struct rde_peer *);
443 int	community_count(struct rde_community *, uint8_t type);
444 int	community_set(struct rde_community *, struct community *,
445 	    struct rde_peer *);
446 void	community_delete(struct rde_community *, struct community *,
447 	    struct rde_peer *);
448 
449 int	community_add(struct rde_community *, int, struct ibuf *);
450 int	community_large_add(struct rde_community *, int, struct ibuf *);
451 int	community_ext_add(struct rde_community *, int, int, struct ibuf *);
452 int	community_writebuf(struct rde_community *, uint8_t, int, struct ibuf *);
453 
454 void			 communities_shutdown(void);
455 struct rde_community	*communities_lookup(struct rde_community *);
456 struct rde_community	*communities_link(struct rde_community *);
457 void			 communities_unlink(struct rde_community *);
458 
459 int	 communities_equal(struct rde_community *, struct rde_community *);
460 void	 communities_copy(struct rde_community *, struct rde_community *);
461 void	 communities_clean(struct rde_community *);
462 
463 static inline struct rde_community *
464 communities_ref(struct rde_community *comm)
465 {
466 	if (comm->refcnt == 0)
467 		fatalx("%s: not-referenced community", __func__);
468 	comm->refcnt++;
469 	rdemem.comm_refs++;
470 	return comm;
471 }
472 
473 static inline void
474 communities_unref(struct rde_community *comm)
475 {
476 	if (comm == NULL)
477 		return;
478 	rdemem.comm_refs--;
479 	if (--comm->refcnt == 1)	/* last ref is hold internally */
480 		communities_unlink(comm);
481 }
482 
483 int	community_to_rd(struct community *, uint64_t *);
484 
485 /* rde_decide.c */
486 int		 prefix_eligible(struct prefix *);
487 struct prefix	*prefix_best(struct rib_entry *);
488 void		 prefix_evaluate(struct rib_entry *, struct prefix *,
489 		    struct prefix *);
490 void		 prefix_evaluate_nexthop(struct prefix *, enum nexthop_state,
491 		    enum nexthop_state);
492 
493 /* rde_filter.c */
494 void	rde_apply_set(struct filter_set_head *, struct rde_peer *,
495 	    struct rde_peer *, struct filterstate *, u_int8_t);
496 void	rde_filterstate_init(struct filterstate *);
497 void	rde_filterstate_prep(struct filterstate *, struct prefix *);
498 void	rde_filterstate_copy(struct filterstate *, struct filterstate *);
499 void	rde_filterstate_set_vstate(struct filterstate *, uint8_t, uint8_t);
500 void	rde_filterstate_clean(struct filterstate *);
501 int	rde_filter_skip_rule(struct rde_peer *, struct filter_rule *);
502 int	rde_filter_equal(struct filter_head *, struct filter_head *);
503 void	rde_filter_calc_skip_steps(struct filter_head *);
504 enum filter_actions rde_filter(struct filter_head *, struct rde_peer *,
505 	    struct rde_peer *, struct bgpd_addr *, uint8_t,
506 	    struct filterstate *);
507 
508 /* rde_prefix.c */
509 void	 pt_init(void);
510 void	 pt_shutdown(void);
511 void	 pt_getaddr(struct pt_entry *, struct bgpd_addr *);
512 int	 pt_getflowspec(struct pt_entry *, uint8_t **);
513 struct pt_entry	*pt_fill(struct bgpd_addr *, int);
514 struct pt_entry	*pt_get(struct bgpd_addr *, int);
515 struct pt_entry *pt_add(struct bgpd_addr *, int);
516 struct pt_entry	*pt_get_flow(struct flowspec *);
517 struct pt_entry	*pt_add_flow(struct flowspec *);
518 void	 pt_remove(struct pt_entry *);
519 struct pt_entry	*pt_lookup(struct bgpd_addr *);
520 int	 pt_prefix_cmp(const struct pt_entry *, const struct pt_entry *);
521 int	 pt_writebuf(struct ibuf *, struct pt_entry *, int, int, uint32_t);
522 
523 static inline struct pt_entry *
524 pt_ref(struct pt_entry *pt)
525 {
526 	++pt->refcnt;
527 	if (pt->refcnt == 0)
528 		fatalx("pt_ref: overflow");
529 	return pt;
530 }
531 
532 static inline void
533 pt_unref(struct pt_entry *pt)
534 {
535 	if (pt->refcnt == 0)
536 		fatalx("pt_unref: underflow");
537 	if (--pt->refcnt == 0)
538 		pt_remove(pt);
539 }
540 
541 /* rde_rib.c */
542 extern uint16_t	rib_size;
543 
544 struct rib	*rib_new(char *, u_int, uint16_t);
545 int		 rib_update(struct rib *);
546 struct rib	*rib_byid(uint16_t);
547 uint16_t	 rib_find(char *);
548 void		 rib_free(struct rib *);
549 void		 rib_shutdown(void);
550 struct rib_entry *rib_get(struct rib *, struct pt_entry *);
551 struct rib_entry *rib_get_addr(struct rib *, struct bgpd_addr *, int);
552 struct rib_entry *rib_match(struct rib *, struct bgpd_addr *);
553 int		 rib_dump_pending(void);
554 void		 rib_dump_runner(void);
555 int		 rib_dump_new(uint16_t, uint8_t, unsigned int, void *,
556 		    void (*)(struct rib_entry *, void *),
557 		    void (*)(void *, uint8_t),
558 		    int (*)(void *));
559 int		 rib_dump_subtree(uint16_t, struct bgpd_addr *, uint8_t,
560 		    unsigned int count, void *arg,
561 		    void (*)(struct rib_entry *, void *),
562 		    void (*)(void *, uint8_t),
563 		    int (*)(void *));
564 void		 rib_dump_terminate(void *);
565 
566 extern struct rib flowrib;
567 
568 static inline struct rib *
569 re_rib(struct rib_entry *re)
570 {
571 	if (re->prefix->aid == AID_FLOWSPECv4 ||
572 	    re->prefix->aid == AID_FLOWSPECv6)
573 		return &flowrib;
574 	return rib_byid(re->rib_id);
575 }
576 
577 void		 path_shutdown(void);
578 struct rde_aspath *path_copy(struct rde_aspath *, const struct rde_aspath *);
579 struct rde_aspath *path_prep(struct rde_aspath *);
580 struct rde_aspath *path_get(void);
581 void		 path_clean(struct rde_aspath *);
582 void		 path_put(struct rde_aspath *);
583 
584 #define	PREFIX_SIZE(x)	(((x) + 7) / 8 + 1)
585 struct prefix	*prefix_get(struct rib *, struct rde_peer *, uint32_t,
586 		    struct bgpd_addr *, int);
587 struct prefix	*prefix_adjout_get(struct rde_peer *, uint32_t,
588 		    struct pt_entry *);
589 struct prefix	*prefix_adjout_first(struct rde_peer *, struct pt_entry *);
590 struct prefix	*prefix_adjout_next(struct rde_peer *, struct prefix *);
591 struct prefix	*prefix_adjout_lookup(struct rde_peer *, struct bgpd_addr *,
592 		    int);
593 struct prefix	*prefix_adjout_match(struct rde_peer *, struct bgpd_addr *);
594 int		 prefix_update(struct rib *, struct rde_peer *, uint32_t,
595 		    uint32_t, struct filterstate *, int, struct bgpd_addr *,
596 		    int);
597 int		 prefix_withdraw(struct rib *, struct rde_peer *, uint32_t,
598 		    struct bgpd_addr *, int);
599 int		 prefix_flowspec_update(struct rde_peer *, struct filterstate *,
600 		    struct pt_entry *, uint32_t);
601 int		 prefix_flowspec_withdraw(struct rde_peer *, struct pt_entry *);
602 void		 prefix_flowspec_dump(uint8_t, void *,
603 		    void (*)(struct rib_entry *, void *),
604 		    void (*)(void *, uint8_t));
605 void		 prefix_add_eor(struct rde_peer *, uint8_t);
606 void		 prefix_adjout_update(struct prefix *, struct rde_peer *,
607 		    struct filterstate *, struct pt_entry *, uint32_t);
608 void		 prefix_adjout_withdraw(struct prefix *);
609 void		 prefix_adjout_destroy(struct prefix *);
610 void		 prefix_adjout_flush_pending(struct rde_peer *);
611 int		 prefix_adjout_reaper(struct rde_peer *);
612 int		 prefix_dump_new(struct rde_peer *, uint8_t, unsigned int,
613 		    void *, void (*)(struct prefix *, void *),
614 		    void (*)(void *, uint8_t), int (*)(void *));
615 int		 prefix_dump_subtree(struct rde_peer *, struct bgpd_addr *,
616 		    uint8_t, unsigned int, void *,
617 		    void (*)(struct prefix *, void *),
618 		    void (*)(void *, uint8_t), int (*)(void *));
619 struct prefix	*prefix_bypeer(struct rib_entry *, struct rde_peer *,
620 		    uint32_t);
621 void		 prefix_destroy(struct prefix *);
622 
623 RB_PROTOTYPE(prefix_tree, prefix, entry, prefix_cmp)
624 
625 static inline struct rde_peer *
626 prefix_peer(struct prefix *p)
627 {
628 	return (p->peer);
629 }
630 
631 static inline struct rde_aspath *
632 prefix_aspath(struct prefix *p)
633 {
634 	return (p->aspath);
635 }
636 
637 static inline struct rde_community *
638 prefix_communities(struct prefix *p)
639 {
640 	return (p->communities);
641 }
642 
643 static inline struct nexthop *
644 prefix_nexthop(struct prefix *p)
645 {
646 	return (p->nexthop);
647 }
648 
649 static inline uint8_t
650 prefix_nhflags(struct prefix *p)
651 {
652 	return (p->nhflags & NEXTHOP_MASK);
653 }
654 
655 static inline int
656 prefix_nhvalid(struct prefix *p)
657 {
658 	return ((p->nhflags & NEXTHOP_VALID) != 0);
659 }
660 
661 static inline uint8_t
662 prefix_roa_vstate(struct prefix *p)
663 {
664 	return (p->validation_state & ROA_MASK);
665 }
666 
667 static inline uint8_t
668 prefix_aspa_vstate(struct prefix *p)
669 {
670 	return (p->validation_state >> 4);
671 }
672 
673 static inline void
674 prefix_set_vstate(struct prefix *p, uint8_t roa_vstate, uint8_t aspa_vstate)
675 {
676 	p->validation_state = roa_vstate & ROA_MASK;
677 	p->validation_state |= aspa_vstate << 4;
678 }
679 
680 static inline struct rib_entry *
681 prefix_re(struct prefix *p)
682 {
683 	if (p->flags & PREFIX_FLAG_ADJOUT)
684 		return NULL;
685 	return (p->entry.list.re);
686 }
687 
688 static inline int
689 prefix_filtered(struct prefix *p)
690 {
691 	return ((p->flags & PREFIX_FLAG_FILTERED) != 0);
692 }
693 
694 void		 nexthop_shutdown(void);
695 int		 nexthop_pending(void);
696 void		 nexthop_runner(void);
697 void		 nexthop_modify(struct nexthop *, enum action_types, uint8_t,
698 		    struct nexthop **, uint8_t *);
699 void		 nexthop_link(struct prefix *);
700 void		 nexthop_unlink(struct prefix *);
701 void		 nexthop_update(struct kroute_nexthop *);
702 struct nexthop	*nexthop_get(struct bgpd_addr *);
703 struct nexthop	*nexthop_ref(struct nexthop *);
704 int		 nexthop_unref(struct nexthop *);
705 
706 /* rde_update.c */
707 void		 up_generate_updates(struct rde_peer *, struct rib_entry *);
708 void		 up_generate_addpath(struct rde_peer *, struct rib_entry *);
709 void		 up_generate_addpath_all(struct rde_peer *, struct rib_entry *,
710 		    struct prefix *, struct prefix *);
711 void		 up_generate_default(struct rde_peer *, uint8_t);
712 int		 up_is_eor(struct rde_peer *, uint8_t);
713 struct ibuf	*up_dump_withdraws(struct rde_peer *, uint8_t);
714 struct ibuf	*up_dump_update(struct rde_peer *, uint8_t);
715 
716 /* rde_aspa.c */
717 void		 aspa_validation(struct rde_aspa *, struct aspath *,
718 		    struct rde_aspa_state *);
719 struct rde_aspa	*aspa_table_prep(uint32_t, size_t);
720 void		 aspa_add_set(struct rde_aspa *, uint32_t, const uint32_t *,
721 		    uint32_t);
722 void		 aspa_table_free(struct rde_aspa *);
723 void		 aspa_table_stats(const struct rde_aspa *,
724 		    struct ctl_show_set *);
725 int		 aspa_table_equal(const struct rde_aspa *,
726 		    const struct rde_aspa *);
727 void		 aspa_table_unchanged(struct rde_aspa *,
728 		    const struct rde_aspa *);
729 
730 #endif /* __RDE_H__ */
731