1 /* $OpenBSD: rde.h,v 1.303 2024/05/29 10:36:32 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org> and
5 * Andre Oppermann <oppermann@networx.ch>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19 #ifndef __RDE_H__
20 #define __RDE_H__
21
22 #include <sys/types.h>
23 #include <sys/queue.h>
24 #include <sys/tree.h>
25 #include <stdint.h>
26 #include <stddef.h>
27
28 #include "bgpd.h"
29 #include "log.h"
30
31 /* rde internal structures */
32
33 enum peer_state {
34 PEER_NONE,
35 PEER_DOWN,
36 PEER_UP,
37 PEER_ERR /* error occurred going to PEER_DOWN state */
38 };
39
40 LIST_HEAD(prefix_list, prefix);
41 TAILQ_HEAD(prefix_queue, prefix);
42 RB_HEAD(rib_tree, rib_entry);
43
44 struct rib_entry {
45 RB_ENTRY(rib_entry) rib_e;
46 struct prefix_queue prefix_h;
47 struct pt_entry *prefix;
48 uint16_t rib_id;
49 uint16_t lock;
50 };
51
52 struct rib {
53 struct rib_tree tree;
54 char name[PEER_DESCR_LEN];
55 struct filter_head *in_rules;
56 struct filter_head *in_rules_tmp;
57 u_int rtableid;
58 u_int rtableid_tmp;
59 enum reconf_action state, fibstate;
60 uint16_t id;
61 uint16_t flags;
62 uint16_t flags_tmp;
63 };
64
65 #define RIB_ADJ_IN 0
66 #define RIB_LOC_START 1
67 #define RIB_NOTFOUND 0xffff
68
69 /*
70 * How do we identify peers between the session handler and the rde?
71 * Currently I assume that we can do that with the neighbor_ip...
72 */
73 RB_HEAD(peer_tree, rde_peer);
74 RB_HEAD(prefix_tree, prefix);
75 RB_HEAD(prefix_index, prefix);
76 struct iq;
77
78 struct rde_peer {
79 RB_ENTRY(rde_peer) entry;
80 SIMPLEQ_HEAD(, iq) imsg_queue;
81 struct peer_config conf;
82 struct rde_peer_stats stats;
83 struct bgpd_addr remote_addr;
84 struct bgpd_addr local_v4_addr;
85 struct bgpd_addr local_v6_addr;
86 struct capabilities capa;
87 struct addpath_eval eval;
88 struct prefix_index adj_rib_out;
89 struct prefix_tree updates[AID_MAX];
90 struct prefix_tree withdraws[AID_MAX];
91 struct filter_head *out_rules;
92 time_t staletime[AID_MAX];
93 uint32_t remote_bgpid;
94 uint32_t path_id_tx;
95 unsigned int local_if_scope;
96 enum peer_state state;
97 enum export_type export_type;
98 enum role role;
99 uint16_t loc_rib_id;
100 uint16_t short_as;
101 uint16_t mrt_idx;
102 uint8_t recv_eor; /* bitfield per AID */
103 uint8_t sent_eor; /* bitfield per AID */
104 uint8_t reconf_out; /* out filter changed */
105 uint8_t reconf_rib; /* rib changed */
106 uint8_t throttled;
107 uint8_t flags;
108 };
109
110 struct rde_aspa;
111 struct rde_aspa_state {
112 uint8_t onlyup;
113 uint8_t downup;
114 };
115
116 #define AS_SET 1
117 #define AS_SEQUENCE 2
118 #define AS_CONFED_SEQUENCE 3
119 #define AS_CONFED_SET 4
120 #define ASPATH_HEADER_SIZE (offsetof(struct aspath, data))
121
122 struct aspath {
123 uint32_t source_as; /* cached source_as */
124 uint16_t len; /* total length of aspath in octets */
125 uint16_t ascnt; /* number of AS hops in data */
126 u_char data[1]; /* placeholder for actual data */
127 };
128
129 enum attrtypes {
130 ATTR_UNDEF,
131 ATTR_ORIGIN,
132 ATTR_ASPATH,
133 ATTR_NEXTHOP,
134 ATTR_MED,
135 ATTR_LOCALPREF,
136 ATTR_ATOMIC_AGGREGATE,
137 ATTR_AGGREGATOR,
138 ATTR_COMMUNITIES,
139 ATTR_ORIGINATOR_ID,
140 ATTR_CLUSTER_LIST,
141 ATTR_MP_REACH_NLRI=14,
142 ATTR_MP_UNREACH_NLRI=15,
143 ATTR_EXT_COMMUNITIES=16,
144 ATTR_AS4_PATH=17,
145 ATTR_AS4_AGGREGATOR=18,
146 ATTR_LARGE_COMMUNITIES=32,
147 ATTR_OTC=35,
148 ATTR_FIRST_UNKNOWN, /* after this all attributes are unknown */
149 };
150
151 /* attribute flags. 4 low order bits reserved */
152 #define ATTR_EXTLEN 0x10
153 #define ATTR_PARTIAL 0x20
154 #define ATTR_TRANSITIVE 0x40
155 #define ATTR_OPTIONAL 0x80
156 #define ATTR_RESERVED 0x0f
157 /* by default mask the reserved bits and the ext len bit */
158 #define ATTR_DEFMASK (ATTR_RESERVED | ATTR_EXTLEN)
159
160 /* default attribute flags for well known attributes */
161 #define ATTR_WELL_KNOWN ATTR_TRANSITIVE
162
163 struct attr {
164 RB_ENTRY(attr) entry;
165 u_char *data;
166 int refcnt;
167 uint16_t len;
168 uint8_t flags;
169 uint8_t type;
170 };
171
172 struct rde_community {
173 RB_ENTRY(rde_community) entry;
174 int size;
175 int nentries;
176 int flags;
177 int refcnt;
178 struct community *communities;
179 };
180
181 #define PARTIAL_COMMUNITIES 0x01
182 #define PARTIAL_LARGE_COMMUNITIES 0x02
183 #define PARTIAL_EXT_COMMUNITIES 0x04
184
185 #define F_ATTR_ORIGIN 0x00001
186 #define F_ATTR_ASPATH 0x00002
187 #define F_ATTR_NEXTHOP 0x00004
188 #define F_ATTR_LOCALPREF 0x00008
189 #define F_ATTR_MED 0x00010
190 #define F_ATTR_MED_ANNOUNCE 0x00020
191 #define F_ATTR_MP_REACH 0x00040
192 #define F_ATTR_MP_UNREACH 0x00080
193 #define F_ATTR_AS4BYTE_NEW 0x00100 /* AS4_PATH or AS4_AGGREGATOR */
194 #define F_ATTR_LOOP 0x00200 /* path would cause a route loop */
195 #define F_PREFIX_ANNOUNCED 0x00400
196 #define F_ANN_DYNAMIC 0x00800
197 #define F_ATTR_OTC 0x01000 /* OTC present */
198 #define F_ATTR_OTC_LEAK 0x02000 /* otc leak, not eligible */
199 #define F_ATTR_PARSE_ERR 0x10000 /* parse error, not eligible */
200 #define F_ATTR_LINKED 0x20000 /* if set path is on various lists */
201
202 #define ORIGIN_IGP 0
203 #define ORIGIN_EGP 1
204 #define ORIGIN_INCOMPLETE 2
205
206 #define DEFAULT_LPREF 100
207
208 struct rde_aspath {
209 RB_ENTRY(rde_aspath) entry;
210 struct attr **others;
211 struct aspath *aspath;
212 struct rde_aspa_state aspa_state;
213 int refcnt;
214 uint32_t flags; /* internally used */
215 uint32_t med; /* multi exit disc */
216 uint32_t lpref; /* local pref */
217 uint32_t weight; /* low prio lpref */
218 uint16_t rtlabelid; /* route label id */
219 uint16_t pftableid; /* pf table id */
220 uint8_t origin;
221 uint8_t others_len;
222 uint8_t aspa_generation;
223 };
224
225 enum nexthop_state {
226 NEXTHOP_LOOKUP,
227 NEXTHOP_UNREACH,
228 NEXTHOP_REACH,
229 NEXTHOP_FLAPPED /* only used by oldstate */
230 };
231
232 struct nexthop {
233 RB_ENTRY(nexthop) entry;
234 TAILQ_ENTRY(nexthop) runner_l;
235 struct prefix_list prefix_h;
236 struct prefix *next_prefix;
237 struct bgpd_addr exit_nexthop;
238 struct bgpd_addr true_nexthop;
239 struct bgpd_addr nexthop_net;
240 #if 0
241 /*
242 * currently we use the boolean nexthop state, this could be exchanged
243 * with a variable cost with a max for unreachable.
244 */
245 uint32_t costs;
246 #endif
247 int refcnt;
248 enum nexthop_state state;
249 enum nexthop_state oldstate;
250 uint8_t nexthop_netlen;
251 uint8_t flags;
252 #define NEXTHOP_CONNECTED 0x01
253 };
254
255 /* generic entry without address specific part */
256 struct pt_entry {
257 RB_ENTRY(pt_entry) pt_e;
258 uint8_t aid;
259 uint8_t prefixlen;
260 uint16_t len;
261 uint32_t refcnt;
262 uint8_t data[4]; /* data depending on aid */
263 };
264
265 struct prefix {
266 union {
267 struct {
268 TAILQ_ENTRY(prefix) rib;
269 LIST_ENTRY(prefix) nexthop;
270 struct rib_entry *re;
271 } list;
272 struct {
273 RB_ENTRY(prefix) index, update;
274 } tree;
275 } entry;
276 struct pt_entry *pt;
277 struct rde_aspath *aspath;
278 struct rde_community *communities;
279 struct rde_peer *peer;
280 struct nexthop *nexthop; /* may be NULL */
281 time_t lastchange;
282 uint32_t path_id;
283 uint32_t path_id_tx;
284 uint8_t validation_state;
285 uint8_t nhflags;
286 int8_t dmetric; /* decision metric */
287 uint8_t flags;
288 #define PREFIX_FLAG_WITHDRAW 0x01 /* enqueued on withdraw queue */
289 #define PREFIX_FLAG_UPDATE 0x02 /* enqueued on update queue */
290 #define PREFIX_FLAG_DEAD 0x04 /* locked but removed */
291 #define PREFIX_FLAG_STALE 0x08 /* stale entry (graceful reload) */
292 #define PREFIX_FLAG_MASK 0x0f /* mask for the prefix types */
293 #define PREFIX_FLAG_ADJOUT 0x10 /* prefix is in the adj-out rib */
294 #define PREFIX_FLAG_EOR 0x20 /* prefix is EoR */
295 #define PREFIX_NEXTHOP_LINKED 0x40 /* prefix is linked onto nexthop list */
296 #define PREFIX_FLAG_LOCKED 0x80 /* locked by rib walker */
297
298 #define PREFIX_DMETRIC_NONE 0
299 #define PREFIX_DMETRIC_INVALID 1
300 #define PREFIX_DMETRIC_VALID 2
301 #define PREFIX_DMETRIC_AS_WIDE 3
302 #define PREFIX_DMETRIC_ECMP 4
303 #define PREFIX_DMETRIC_BEST 5
304 };
305
306 /* possible states for nhflags */
307 #define NEXTHOP_SELF 0x01
308 #define NEXTHOP_REJECT 0x02
309 #define NEXTHOP_BLACKHOLE 0x04
310 #define NEXTHOP_NOMODIFY 0x08
311 #define NEXTHOP_MASK 0x0f
312 #define NEXTHOP_VALID 0x80
313
314 struct filterstate {
315 struct rde_aspath aspath;
316 struct rde_community communities;
317 struct nexthop *nexthop;
318 uint8_t nhflags;
319 uint8_t vstate;
320 };
321
322 enum eval_mode {
323 EVAL_DEFAULT,
324 EVAL_ALL,
325 EVAL_RECONF,
326 };
327
328 extern struct rde_memstats rdemem;
329
330 /* prototypes */
331 /* mrt.c */
332 int mrt_dump_v2_hdr(struct mrt *, struct bgpd_config *);
333 void mrt_dump_upcall(struct rib_entry *, void *);
334
335 /* rde.c */
336 void rde_update_err(struct rde_peer *, uint8_t , uint8_t,
337 struct ibuf *);
338 void rde_update_log(const char *, uint16_t,
339 const struct rde_peer *, const struct bgpd_addr *,
340 const struct bgpd_addr *, uint8_t);
341 void rde_send_kroute_flush(struct rib *);
342 void rde_send_kroute(struct rib *, struct prefix *, struct prefix *);
343 void rde_send_nexthop(struct bgpd_addr *, int);
344 void rde_pftable_add(uint16_t, struct prefix *);
345 void rde_pftable_del(uint16_t, struct prefix *);
346
347 int rde_evaluate_all(void);
348 uint32_t rde_local_as(void);
349 int rde_decisionflags(void);
350 void rde_peer_send_rrefresh(struct rde_peer *, uint8_t, uint8_t);
351 int rde_match_peer(struct rde_peer *, struct ctl_neighbor *);
352
353 /* rde_peer.c */
354 int peer_has_as4byte(struct rde_peer *);
355 int peer_has_add_path(struct rde_peer *, uint8_t, int);
356 int peer_accept_no_as_set(struct rde_peer *);
357 void peer_init(struct filter_head *);
358 void peer_shutdown(void);
359 void peer_foreach(void (*)(struct rde_peer *, void *), void *);
360 struct rde_peer *peer_get(uint32_t);
361 struct rde_peer *peer_match(struct ctl_neighbor *, uint32_t);
362 struct rde_peer *peer_add(uint32_t, struct peer_config *, struct filter_head *);
363 struct filter_head *peer_apply_out_filter(struct rde_peer *,
364 struct filter_head *);
365
366 void rde_generate_updates(struct rib_entry *, struct prefix *,
367 struct prefix *, enum eval_mode);
368
369 void peer_up(struct rde_peer *, struct session_up *);
370 void peer_down(struct rde_peer *, void *);
371 void peer_flush(struct rde_peer *, uint8_t, time_t);
372 void peer_stale(struct rde_peer *, uint8_t, int);
373 void peer_dump(struct rde_peer *, uint8_t);
374 void peer_begin_rrefresh(struct rde_peer *, uint8_t);
375
376 void peer_imsg_push(struct rde_peer *, struct imsg *);
377 int peer_imsg_pop(struct rde_peer *, struct imsg *);
378 int peer_imsg_pending(void);
379 void peer_imsg_flush(struct rde_peer *);
380
381 RB_PROTOTYPE(peer_tree, rde_peer, entry, peer_cmp);
382
383 /* rde_attr.c */
384 int attr_writebuf(struct ibuf *, uint8_t, uint8_t, void *,
385 uint16_t);
386 void attr_shutdown(void);
387 int attr_optadd(struct rde_aspath *, uint8_t, uint8_t,
388 void *, uint16_t);
389 struct attr *attr_optget(const struct rde_aspath *, uint8_t);
390 void attr_copy(struct rde_aspath *, const struct rde_aspath *);
391 int attr_compare(struct rde_aspath *, struct rde_aspath *);
392 void attr_freeall(struct rde_aspath *);
393 void attr_free(struct rde_aspath *, struct attr *);
394
395 struct aspath *aspath_get(void *, uint16_t);
396 struct aspath *aspath_copy(struct aspath *);
397 void aspath_put(struct aspath *);
398 u_char *aspath_deflate(u_char *, uint16_t *, int *);
399 void aspath_merge(struct rde_aspath *, struct attr *);
400 uint32_t aspath_neighbor(struct aspath *);
401 int aspath_loopfree(struct aspath *, uint32_t);
402 int aspath_compare(struct aspath *, struct aspath *);
403 int aspath_match(struct aspath *, struct filter_as *, uint32_t);
404 u_char *aspath_prepend(struct aspath *, uint32_t, int, uint16_t *);
405 u_char *aspath_override(struct aspath *, uint32_t, uint32_t,
406 uint16_t *);
407 int aspath_lenmatch(struct aspath *, enum aslen_spec, u_int);
408
409 static inline u_char *
aspath_dump(struct aspath * aspath)410 aspath_dump(struct aspath *aspath)
411 {
412 return (aspath->data);
413 }
414
415 static inline uint16_t
aspath_length(struct aspath * aspath)416 aspath_length(struct aspath *aspath)
417 {
418 return (aspath->len);
419 }
420
421 static inline uint32_t
aspath_origin(struct aspath * aspath)422 aspath_origin(struct aspath *aspath)
423 {
424 return (aspath->source_as);
425 }
426
427 /* rde_community.c */
428 int community_match(struct rde_community *, struct community *,
429 struct rde_peer *);
430 int community_count(struct rde_community *, uint8_t type);
431 int community_set(struct rde_community *, struct community *,
432 struct rde_peer *);
433 void community_delete(struct rde_community *, struct community *,
434 struct rde_peer *);
435
436 int community_add(struct rde_community *, int, struct ibuf *);
437 int community_large_add(struct rde_community *, int, struct ibuf *);
438 int community_ext_add(struct rde_community *, int, int, struct ibuf *);
439 int community_writebuf(struct rde_community *, uint8_t, int, struct ibuf *);
440
441 void communities_shutdown(void);
442 struct rde_community *communities_lookup(struct rde_community *);
443 struct rde_community *communities_link(struct rde_community *);
444 void communities_unlink(struct rde_community *);
445
446 int communities_equal(struct rde_community *, struct rde_community *);
447 void communities_copy(struct rde_community *, struct rde_community *);
448 void communities_clean(struct rde_community *);
449
450 static inline struct rde_community *
communities_ref(struct rde_community * comm)451 communities_ref(struct rde_community *comm)
452 {
453 if (comm->refcnt == 0)
454 fatalx("%s: not-referenced community", __func__);
455 comm->refcnt++;
456 rdemem.comm_refs++;
457 return comm;
458 }
459
460 static inline void
communities_unref(struct rde_community * comm)461 communities_unref(struct rde_community *comm)
462 {
463 if (comm == NULL)
464 return;
465 rdemem.comm_refs--;
466 if (--comm->refcnt == 1) /* last ref is hold internally */
467 communities_unlink(comm);
468 }
469
470 int community_to_rd(struct community *, uint64_t *);
471
472 /* rde_decide.c */
473 int prefix_eligible(struct prefix *);
474 struct prefix *prefix_best(struct rib_entry *);
475 void prefix_evaluate(struct rib_entry *, struct prefix *,
476 struct prefix *);
477 void prefix_evaluate_nexthop(struct prefix *, enum nexthop_state,
478 enum nexthop_state);
479
480 /* rde_filter.c */
481 void rde_apply_set(struct filter_set_head *, struct rde_peer *,
482 struct rde_peer *, struct filterstate *, u_int8_t);
483 void rde_filterstate_init(struct filterstate *);
484 void rde_filterstate_prep(struct filterstate *, struct prefix *);
485 void rde_filterstate_copy(struct filterstate *, struct filterstate *);
486 void rde_filterstate_set_vstate(struct filterstate *, uint8_t, uint8_t);
487 void rde_filterstate_clean(struct filterstate *);
488 int rde_filter_skip_rule(struct rde_peer *, struct filter_rule *);
489 int rde_filter_equal(struct filter_head *, struct filter_head *);
490 void rde_filter_calc_skip_steps(struct filter_head *);
491 enum filter_actions rde_filter(struct filter_head *, struct rde_peer *,
492 struct rde_peer *, struct bgpd_addr *, uint8_t,
493 struct filterstate *);
494
495 /* rde_prefix.c */
496 void pt_init(void);
497 void pt_shutdown(void);
498 void pt_getaddr(struct pt_entry *, struct bgpd_addr *);
499 int pt_getflowspec(struct pt_entry *, uint8_t **);
500 struct pt_entry *pt_fill(struct bgpd_addr *, int);
501 struct pt_entry *pt_get(struct bgpd_addr *, int);
502 struct pt_entry *pt_add(struct bgpd_addr *, int);
503 struct pt_entry *pt_get_flow(struct flowspec *);
504 struct pt_entry *pt_add_flow(struct flowspec *);
505 void pt_remove(struct pt_entry *);
506 struct pt_entry *pt_lookup(struct bgpd_addr *);
507 int pt_prefix_cmp(const struct pt_entry *, const struct pt_entry *);
508 int pt_writebuf(struct ibuf *, struct pt_entry *, int, int, uint32_t);
509
510 static inline struct pt_entry *
pt_ref(struct pt_entry * pt)511 pt_ref(struct pt_entry *pt)
512 {
513 ++pt->refcnt;
514 if (pt->refcnt == 0)
515 fatalx("pt_ref: overflow");
516 return pt;
517 }
518
519 static inline void
pt_unref(struct pt_entry * pt)520 pt_unref(struct pt_entry *pt)
521 {
522 if (pt->refcnt == 0)
523 fatalx("pt_unref: underflow");
524 if (--pt->refcnt == 0)
525 pt_remove(pt);
526 }
527
528 /* rde_rib.c */
529 extern uint16_t rib_size;
530
531 struct rib *rib_new(char *, u_int, uint16_t);
532 int rib_update(struct rib *);
533 struct rib *rib_byid(uint16_t);
534 uint16_t rib_find(char *);
535 void rib_free(struct rib *);
536 void rib_shutdown(void);
537 struct rib_entry *rib_get(struct rib *, struct pt_entry *);
538 struct rib_entry *rib_get_addr(struct rib *, struct bgpd_addr *, int);
539 struct rib_entry *rib_match(struct rib *, struct bgpd_addr *);
540 int rib_dump_pending(void);
541 void rib_dump_runner(void);
542 int rib_dump_new(uint16_t, uint8_t, unsigned int, void *,
543 void (*)(struct rib_entry *, void *),
544 void (*)(void *, uint8_t),
545 int (*)(void *));
546 int rib_dump_subtree(uint16_t, struct bgpd_addr *, uint8_t,
547 unsigned int count, void *arg,
548 void (*)(struct rib_entry *, void *),
549 void (*)(void *, uint8_t),
550 int (*)(void *));
551 void rib_dump_terminate(void *);
552
553 extern struct rib flowrib;
554
555 static inline struct rib *
re_rib(struct rib_entry * re)556 re_rib(struct rib_entry *re)
557 {
558 if (re->prefix->aid == AID_FLOWSPECv4 ||
559 re->prefix->aid == AID_FLOWSPECv6)
560 return &flowrib;
561 return rib_byid(re->rib_id);
562 }
563
564 void path_shutdown(void);
565 struct rde_aspath *path_copy(struct rde_aspath *, const struct rde_aspath *);
566 struct rde_aspath *path_prep(struct rde_aspath *);
567 struct rde_aspath *path_get(void);
568 void path_clean(struct rde_aspath *);
569 void path_put(struct rde_aspath *);
570
571 #define PREFIX_SIZE(x) (((x) + 7) / 8 + 1)
572 struct prefix *prefix_get(struct rib *, struct rde_peer *, uint32_t,
573 struct bgpd_addr *, int);
574 struct prefix *prefix_adjout_get(struct rde_peer *, uint32_t,
575 struct pt_entry *);
576 struct prefix *prefix_adjout_first(struct rde_peer *, struct pt_entry *);
577 struct prefix *prefix_adjout_next(struct rde_peer *, struct prefix *);
578 struct prefix *prefix_adjout_lookup(struct rde_peer *, struct bgpd_addr *,
579 int);
580 struct prefix *prefix_adjout_match(struct rde_peer *, struct bgpd_addr *);
581 int prefix_update(struct rib *, struct rde_peer *, uint32_t,
582 uint32_t, struct filterstate *, struct bgpd_addr *, int);
583 int prefix_withdraw(struct rib *, struct rde_peer *, uint32_t,
584 struct bgpd_addr *, int);
585 int prefix_flowspec_update(struct rde_peer *, struct filterstate *,
586 struct pt_entry *, uint32_t);
587 int prefix_flowspec_withdraw(struct rde_peer *, struct pt_entry *);
588 void prefix_flowspec_dump(uint8_t, void *,
589 void (*)(struct rib_entry *, void *),
590 void (*)(void *, uint8_t));
591 void prefix_add_eor(struct rde_peer *, uint8_t);
592 void prefix_adjout_update(struct prefix *, struct rde_peer *,
593 struct filterstate *, struct pt_entry *, uint32_t);
594 void prefix_adjout_withdraw(struct prefix *);
595 void prefix_adjout_destroy(struct prefix *);
596 int prefix_dump_new(struct rde_peer *, uint8_t, unsigned int,
597 void *, void (*)(struct prefix *, void *),
598 void (*)(void *, uint8_t), int (*)(void *));
599 int prefix_dump_subtree(struct rde_peer *, struct bgpd_addr *,
600 uint8_t, unsigned int, void *,
601 void (*)(struct prefix *, void *),
602 void (*)(void *, uint8_t), int (*)(void *));
603 struct prefix *prefix_bypeer(struct rib_entry *, struct rde_peer *,
604 uint32_t);
605 void prefix_destroy(struct prefix *);
606
RB_PROTOTYPE(prefix_tree,prefix,entry,prefix_cmp)607 RB_PROTOTYPE(prefix_tree, prefix, entry, prefix_cmp)
608
609 static inline struct rde_peer *
610 prefix_peer(struct prefix *p)
611 {
612 return (p->peer);
613 }
614
615 static inline struct rde_aspath *
prefix_aspath(struct prefix * p)616 prefix_aspath(struct prefix *p)
617 {
618 return (p->aspath);
619 }
620
621 static inline struct rde_community *
prefix_communities(struct prefix * p)622 prefix_communities(struct prefix *p)
623 {
624 return (p->communities);
625 }
626
627 static inline struct nexthop *
prefix_nexthop(struct prefix * p)628 prefix_nexthop(struct prefix *p)
629 {
630 return (p->nexthop);
631 }
632
633 static inline uint8_t
prefix_nhflags(struct prefix * p)634 prefix_nhflags(struct prefix *p)
635 {
636 return (p->nhflags & NEXTHOP_MASK);
637 }
638
639 static inline int
prefix_nhvalid(struct prefix * p)640 prefix_nhvalid(struct prefix *p)
641 {
642 return ((p->nhflags & NEXTHOP_VALID) != 0);
643 }
644
645 static inline uint8_t
prefix_roa_vstate(struct prefix * p)646 prefix_roa_vstate(struct prefix *p)
647 {
648 return (p->validation_state & ROA_MASK);
649 }
650
651 static inline uint8_t
prefix_aspa_vstate(struct prefix * p)652 prefix_aspa_vstate(struct prefix *p)
653 {
654 return (p->validation_state >> 4);
655 }
656
657 static inline void
prefix_set_vstate(struct prefix * p,uint8_t roa_vstate,uint8_t aspa_vstate)658 prefix_set_vstate(struct prefix *p, uint8_t roa_vstate, uint8_t aspa_vstate)
659 {
660 p->validation_state = roa_vstate & ROA_MASK;
661 p->validation_state |= aspa_vstate << 4;
662 }
663
664 static inline struct rib_entry *
prefix_re(struct prefix * p)665 prefix_re(struct prefix *p)
666 {
667 if (p->flags & PREFIX_FLAG_ADJOUT)
668 return NULL;
669 return (p->entry.list.re);
670 }
671
672 void nexthop_shutdown(void);
673 int nexthop_pending(void);
674 void nexthop_runner(void);
675 void nexthop_modify(struct nexthop *, enum action_types, uint8_t,
676 struct nexthop **, uint8_t *);
677 void nexthop_link(struct prefix *);
678 void nexthop_unlink(struct prefix *);
679 void nexthop_update(struct kroute_nexthop *);
680 struct nexthop *nexthop_get(struct bgpd_addr *);
681 struct nexthop *nexthop_ref(struct nexthop *);
682 int nexthop_unref(struct nexthop *);
683
684 /* rde_update.c */
685 void up_generate_updates(struct rde_peer *, struct rib_entry *);
686 void up_generate_addpath(struct rde_peer *, struct rib_entry *);
687 void up_generate_addpath_all(struct rde_peer *, struct rib_entry *,
688 struct prefix *, struct prefix *);
689 void up_generate_default(struct rde_peer *, uint8_t);
690 int up_is_eor(struct rde_peer *, uint8_t);
691 int up_dump_withdraws(struct ibuf *, struct rde_peer *, uint8_t);
692 int up_dump_update(struct ibuf *, struct rde_peer *, uint8_t);
693
694 /* rde_aspa.c */
695 void aspa_validation(struct rde_aspa *, struct aspath *,
696 struct rde_aspa_state *);
697 struct rde_aspa *aspa_table_prep(uint32_t, size_t);
698 void aspa_add_set(struct rde_aspa *, uint32_t, const uint32_t *,
699 uint32_t);
700 void aspa_table_free(struct rde_aspa *);
701 void aspa_table_stats(const struct rde_aspa *,
702 struct ctl_show_set *);
703 int aspa_table_equal(const struct rde_aspa *,
704 const struct rde_aspa *);
705 void aspa_table_unchanged(struct rde_aspa *,
706 const struct rde_aspa *);
707
708 #endif /* __RDE_H__ */
709