1 /* $OpenBSD: rde.h,v 1.306 2024/09/25 14:46:51 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org> and
5 * Andre Oppermann <oppermann@networx.ch>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19 #ifndef __RDE_H__
20 #define __RDE_H__
21
22 #include <sys/types.h>
23 #include <sys/queue.h>
24 #include <sys/tree.h>
25 #include <stdint.h>
26 #include <stddef.h>
27
28 #include "bgpd.h"
29 #include "log.h"
30
31 /* rde internal structures */
32
33 enum peer_state {
34 PEER_NONE,
35 PEER_DOWN,
36 PEER_UP,
37 PEER_ERR /* error occurred going to PEER_DOWN state */
38 };
39
40 LIST_HEAD(prefix_list, prefix);
41 TAILQ_HEAD(prefix_queue, prefix);
42 RB_HEAD(rib_tree, rib_entry);
43
44 struct rib_entry {
45 RB_ENTRY(rib_entry) rib_e;
46 struct prefix_queue prefix_h;
47 struct pt_entry *prefix;
48 uint16_t rib_id;
49 uint16_t lock;
50 };
51
52 struct rib {
53 struct rib_tree tree;
54 char name[PEER_DESCR_LEN];
55 struct filter_head *in_rules;
56 struct filter_head *in_rules_tmp;
57 u_int rtableid;
58 u_int rtableid_tmp;
59 enum reconf_action state, fibstate;
60 uint16_t id;
61 uint16_t flags;
62 uint16_t flags_tmp;
63 };
64
65 #define RIB_ADJ_IN 0
66 #define RIB_LOC_START 1
67 #define RIB_NOTFOUND 0xffff
68
69 /*
70 * How do we identify peers between the session handler and the rde?
71 * Currently I assume that we can do that with the neighbor_ip...
72 */
73 RB_HEAD(peer_tree, rde_peer);
74 RB_HEAD(prefix_tree, prefix);
75 RB_HEAD(prefix_index, prefix);
76 struct iq;
77
78 struct rde_peer {
79 RB_ENTRY(rde_peer) entry;
80 SIMPLEQ_HEAD(, iq) imsg_queue;
81 struct peer_config conf;
82 struct rde_peer_stats stats;
83 struct bgpd_addr remote_addr;
84 struct bgpd_addr local_v4_addr;
85 struct bgpd_addr local_v6_addr;
86 struct capabilities capa;
87 struct addpath_eval eval;
88 struct prefix_index adj_rib_out;
89 struct prefix_tree updates[AID_MAX];
90 struct prefix_tree withdraws[AID_MAX];
91 struct filter_head *out_rules;
92 time_t staletime[AID_MAX];
93 uint32_t remote_bgpid;
94 uint32_t path_id_tx;
95 unsigned int local_if_scope;
96 enum peer_state state;
97 enum export_type export_type;
98 enum role role;
99 uint16_t loc_rib_id;
100 uint16_t short_as;
101 uint16_t mrt_idx;
102 uint8_t recv_eor; /* bitfield per AID */
103 uint8_t sent_eor; /* bitfield per AID */
104 uint8_t reconf_out; /* out filter changed */
105 uint8_t reconf_rib; /* rib changed */
106 uint8_t throttled;
107 uint8_t flags;
108 };
109
110 struct rde_aspa;
111 struct rde_aspa_state {
112 uint8_t onlyup;
113 uint8_t downup;
114 };
115
116 #define AS_SET 1
117 #define AS_SEQUENCE 2
118 #define AS_CONFED_SEQUENCE 3
119 #define AS_CONFED_SET 4
120 #define ASPATH_HEADER_SIZE (offsetof(struct aspath, data))
121
122 struct aspath {
123 uint32_t source_as; /* cached source_as */
124 uint16_t len; /* total length of aspath in octets */
125 uint16_t ascnt; /* number of AS hops in data */
126 u_char data[1]; /* placeholder for actual data */
127 };
128
129 enum attrtypes {
130 ATTR_UNDEF,
131 ATTR_ORIGIN,
132 ATTR_ASPATH,
133 ATTR_NEXTHOP,
134 ATTR_MED,
135 ATTR_LOCALPREF,
136 ATTR_ATOMIC_AGGREGATE,
137 ATTR_AGGREGATOR,
138 ATTR_COMMUNITIES,
139 ATTR_ORIGINATOR_ID,
140 ATTR_CLUSTER_LIST,
141 ATTR_MP_REACH_NLRI=14,
142 ATTR_MP_UNREACH_NLRI=15,
143 ATTR_EXT_COMMUNITIES=16,
144 ATTR_AS4_PATH=17,
145 ATTR_AS4_AGGREGATOR=18,
146 ATTR_LARGE_COMMUNITIES=32,
147 ATTR_OTC=35,
148 ATTR_FIRST_UNKNOWN, /* after this all attributes are unknown */
149 };
150
151 /* attribute flags. 4 low order bits reserved */
152 #define ATTR_EXTLEN 0x10
153 #define ATTR_PARTIAL 0x20
154 #define ATTR_TRANSITIVE 0x40
155 #define ATTR_OPTIONAL 0x80
156 #define ATTR_RESERVED 0x0f
157 /* by default mask the reserved bits and the ext len bit */
158 #define ATTR_DEFMASK (ATTR_RESERVED | ATTR_EXTLEN)
159
160 /* default attribute flags for well known attributes */
161 #define ATTR_WELL_KNOWN ATTR_TRANSITIVE
162
163 struct attr {
164 RB_ENTRY(attr) entry;
165 u_char *data;
166 int refcnt;
167 uint16_t len;
168 uint8_t flags;
169 uint8_t type;
170 };
171
172 struct rde_community {
173 RB_ENTRY(rde_community) entry;
174 int size;
175 int nentries;
176 int flags;
177 int refcnt;
178 struct community *communities;
179 };
180
181 #define PARTIAL_COMMUNITIES 0x01
182 #define PARTIAL_LARGE_COMMUNITIES 0x02
183 #define PARTIAL_EXT_COMMUNITIES 0x04
184
185 #define F_ATTR_ORIGIN 0x00001
186 #define F_ATTR_ASPATH 0x00002
187 #define F_ATTR_NEXTHOP 0x00004
188 #define F_ATTR_LOCALPREF 0x00008
189 #define F_ATTR_MED 0x00010
190 #define F_ATTR_MED_ANNOUNCE 0x00020
191 #define F_ATTR_MP_REACH 0x00040
192 #define F_ATTR_MP_UNREACH 0x00080
193 #define F_ATTR_AS4BYTE_NEW 0x00100 /* AS4_PATH or AS4_AGGREGATOR */
194 #define F_ATTR_LOOP 0x00200 /* path would cause a route loop */
195 #define F_PREFIX_ANNOUNCED 0x00400
196 #define F_ANN_DYNAMIC 0x00800
197 #define F_ATTR_OTC 0x01000 /* OTC present */
198 #define F_ATTR_OTC_LEAK 0x02000 /* otc leak, not eligible */
199 #define F_ATTR_PARSE_ERR 0x10000 /* parse error, not eligible */
200 #define F_ATTR_LINKED 0x20000 /* if set path is on various lists */
201
202 #define ORIGIN_IGP 0
203 #define ORIGIN_EGP 1
204 #define ORIGIN_INCOMPLETE 2
205
206 #define DEFAULT_LPREF 100
207
208 struct rde_aspath {
209 RB_ENTRY(rde_aspath) entry;
210 struct attr **others;
211 struct aspath *aspath;
212 struct rde_aspa_state aspa_state;
213 int refcnt;
214 uint32_t flags; /* internally used */
215 uint32_t med; /* multi exit disc */
216 uint32_t lpref; /* local pref */
217 uint32_t weight; /* low prio lpref */
218 uint16_t rtlabelid; /* route label id */
219 uint16_t pftableid; /* pf table id */
220 uint8_t origin;
221 uint8_t others_len;
222 uint8_t aspa_generation;
223 };
224
225 enum nexthop_state {
226 NEXTHOP_LOOKUP,
227 NEXTHOP_UNREACH,
228 NEXTHOP_REACH,
229 NEXTHOP_FLAPPED /* only used by oldstate */
230 };
231
232 struct nexthop {
233 RB_ENTRY(nexthop) entry;
234 TAILQ_ENTRY(nexthop) runner_l;
235 struct prefix_list prefix_h;
236 struct prefix *next_prefix;
237 struct bgpd_addr exit_nexthop;
238 struct bgpd_addr true_nexthop;
239 struct bgpd_addr nexthop_net;
240 #if 0
241 /*
242 * currently we use the boolean nexthop state, this could be exchanged
243 * with a variable cost with a max for unreachable.
244 */
245 uint32_t costs;
246 #endif
247 int refcnt;
248 enum nexthop_state state;
249 enum nexthop_state oldstate;
250 uint8_t nexthop_netlen;
251 uint8_t flags;
252 #define NEXTHOP_CONNECTED 0x01
253 };
254
255 /* generic entry without address specific part */
256 struct pt_entry {
257 RB_ENTRY(pt_entry) pt_e;
258 uint8_t aid;
259 uint8_t prefixlen;
260 uint16_t len;
261 uint32_t refcnt;
262 uint8_t data[4]; /* data depending on aid */
263 };
264
265 struct prefix {
266 union {
267 struct {
268 TAILQ_ENTRY(prefix) rib;
269 LIST_ENTRY(prefix) nexthop;
270 struct rib_entry *re;
271 } list;
272 struct {
273 RB_ENTRY(prefix) index, update;
274 } tree;
275 } entry;
276 struct pt_entry *pt;
277 struct rde_aspath *aspath;
278 struct rde_community *communities;
279 struct rde_peer *peer;
280 struct nexthop *nexthop; /* may be NULL */
281 time_t lastchange;
282 uint32_t path_id;
283 uint32_t path_id_tx;
284 uint16_t flags;
285 uint8_t validation_state;
286 uint8_t nhflags;
287 int8_t dmetric; /* decision metric */
288 };
289 #define PREFIX_FLAG_WITHDRAW 0x0001 /* enqueued on withdraw queue */
290 #define PREFIX_FLAG_UPDATE 0x0002 /* enqueued on update queue */
291 #define PREFIX_FLAG_DEAD 0x0004 /* locked but removed */
292 #define PREFIX_FLAG_STALE 0x0008 /* stale entry (graceful reload) */
293 #define PREFIX_FLAG_MASK 0x000f /* mask for the prefix types */
294 #define PREFIX_FLAG_ADJOUT 0x0010 /* prefix is in the adj-out rib */
295 #define PREFIX_FLAG_EOR 0x0020 /* prefix is EoR */
296 #define PREFIX_NEXTHOP_LINKED 0x0040 /* prefix is linked onto nexthop list */
297 #define PREFIX_FLAG_LOCKED 0x0080 /* locked by rib walker */
298 #define PREFIX_FLAG_FILTERED 0x0100 /* prefix is filtered (ineligible) */
299
300 #define PREFIX_DMETRIC_NONE 0
301 #define PREFIX_DMETRIC_INVALID 1
302 #define PREFIX_DMETRIC_VALID 2
303 #define PREFIX_DMETRIC_AS_WIDE 3
304 #define PREFIX_DMETRIC_ECMP 4
305 #define PREFIX_DMETRIC_BEST 5
306
307 /* possible states for nhflags */
308 #define NEXTHOP_SELF 0x01
309 #define NEXTHOP_REJECT 0x02
310 #define NEXTHOP_BLACKHOLE 0x04
311 #define NEXTHOP_NOMODIFY 0x08
312 #define NEXTHOP_MASK 0x0f
313 #define NEXTHOP_VALID 0x80
314
315 struct filterstate {
316 struct rde_aspath aspath;
317 struct rde_community communities;
318 struct nexthop *nexthop;
319 uint8_t nhflags;
320 uint8_t vstate;
321 };
322
323 enum eval_mode {
324 EVAL_DEFAULT,
325 EVAL_ALL,
326 EVAL_RECONF,
327 };
328
329 extern struct rde_memstats rdemem;
330
331 /* prototypes */
332 /* mrt.c */
333 int mrt_dump_v2_hdr(struct mrt *, struct bgpd_config *);
334 void mrt_dump_upcall(struct rib_entry *, void *);
335
336 /* rde.c */
337 void rde_update_err(struct rde_peer *, uint8_t , uint8_t,
338 struct ibuf *);
339 void rde_update_log(const char *, uint16_t,
340 const struct rde_peer *, const struct bgpd_addr *,
341 const struct bgpd_addr *, uint8_t);
342 void rde_send_kroute_flush(struct rib *);
343 void rde_send_kroute(struct rib *, struct prefix *, struct prefix *);
344 void rde_send_nexthop(struct bgpd_addr *, int);
345 void rde_pftable_add(uint16_t, struct prefix *);
346 void rde_pftable_del(uint16_t, struct prefix *);
347
348 int rde_evaluate_all(void);
349 uint32_t rde_local_as(void);
350 int rde_decisionflags(void);
351 void rde_peer_send_rrefresh(struct rde_peer *, uint8_t, uint8_t);
352 int rde_match_peer(struct rde_peer *, struct ctl_neighbor *);
353
354 /* rde_peer.c */
355 int peer_has_as4byte(struct rde_peer *);
356 int peer_has_add_path(struct rde_peer *, uint8_t, int);
357 int peer_accept_no_as_set(struct rde_peer *);
358 void peer_init(struct filter_head *);
359 void peer_shutdown(void);
360 void peer_foreach(void (*)(struct rde_peer *, void *), void *);
361 struct rde_peer *peer_get(uint32_t);
362 struct rde_peer *peer_match(struct ctl_neighbor *, uint32_t);
363 struct rde_peer *peer_add(uint32_t, struct peer_config *, struct filter_head *);
364 struct filter_head *peer_apply_out_filter(struct rde_peer *,
365 struct filter_head *);
366
367 void rde_generate_updates(struct rib_entry *, struct prefix *,
368 struct prefix *, enum eval_mode);
369
370 void peer_up(struct rde_peer *, struct session_up *);
371 void peer_down(struct rde_peer *, void *);
372 void peer_flush(struct rde_peer *, uint8_t, time_t);
373 void peer_stale(struct rde_peer *, uint8_t, int);
374 void peer_dump(struct rde_peer *, uint8_t);
375 void peer_begin_rrefresh(struct rde_peer *, uint8_t);
376
377 void peer_imsg_push(struct rde_peer *, struct imsg *);
378 int peer_imsg_pop(struct rde_peer *, struct imsg *);
379 int peer_imsg_pending(void);
380 void peer_imsg_flush(struct rde_peer *);
381
382 static inline int
peer_is_up(struct rde_peer * peer)383 peer_is_up(struct rde_peer *peer)
384 {
385 return (peer->state == PEER_UP);
386 }
387
388 RB_PROTOTYPE(peer_tree, rde_peer, entry, peer_cmp);
389
390 /* rde_attr.c */
391 int attr_writebuf(struct ibuf *, uint8_t, uint8_t, void *,
392 uint16_t);
393 void attr_shutdown(void);
394 int attr_optadd(struct rde_aspath *, uint8_t, uint8_t,
395 void *, uint16_t);
396 struct attr *attr_optget(const struct rde_aspath *, uint8_t);
397 void attr_copy(struct rde_aspath *, const struct rde_aspath *);
398 int attr_compare(struct rde_aspath *, struct rde_aspath *);
399 void attr_freeall(struct rde_aspath *);
400 void attr_free(struct rde_aspath *, struct attr *);
401
402 struct aspath *aspath_get(void *, uint16_t);
403 struct aspath *aspath_copy(struct aspath *);
404 void aspath_put(struct aspath *);
405 u_char *aspath_deflate(u_char *, uint16_t *, int *);
406 void aspath_merge(struct rde_aspath *, struct attr *);
407 uint32_t aspath_neighbor(struct aspath *);
408 int aspath_loopfree(struct aspath *, uint32_t);
409 int aspath_compare(struct aspath *, struct aspath *);
410 int aspath_match(struct aspath *, struct filter_as *, uint32_t);
411 u_char *aspath_prepend(struct aspath *, uint32_t, int, uint16_t *);
412 u_char *aspath_override(struct aspath *, uint32_t, uint32_t,
413 uint16_t *);
414 int aspath_lenmatch(struct aspath *, enum aslen_spec, u_int);
415
416 static inline u_char *
aspath_dump(struct aspath * aspath)417 aspath_dump(struct aspath *aspath)
418 {
419 return (aspath->data);
420 }
421
422 static inline uint16_t
aspath_length(struct aspath * aspath)423 aspath_length(struct aspath *aspath)
424 {
425 return (aspath->len);
426 }
427
428 static inline uint32_t
aspath_origin(struct aspath * aspath)429 aspath_origin(struct aspath *aspath)
430 {
431 return (aspath->source_as);
432 }
433
434 /* rde_community.c */
435 int community_match(struct rde_community *, struct community *,
436 struct rde_peer *);
437 int community_count(struct rde_community *, uint8_t type);
438 int community_set(struct rde_community *, struct community *,
439 struct rde_peer *);
440 void community_delete(struct rde_community *, struct community *,
441 struct rde_peer *);
442
443 int community_add(struct rde_community *, int, struct ibuf *);
444 int community_large_add(struct rde_community *, int, struct ibuf *);
445 int community_ext_add(struct rde_community *, int, int, struct ibuf *);
446 int community_writebuf(struct rde_community *, uint8_t, int, struct ibuf *);
447
448 void communities_shutdown(void);
449 struct rde_community *communities_lookup(struct rde_community *);
450 struct rde_community *communities_link(struct rde_community *);
451 void communities_unlink(struct rde_community *);
452
453 int communities_equal(struct rde_community *, struct rde_community *);
454 void communities_copy(struct rde_community *, struct rde_community *);
455 void communities_clean(struct rde_community *);
456
457 static inline struct rde_community *
communities_ref(struct rde_community * comm)458 communities_ref(struct rde_community *comm)
459 {
460 if (comm->refcnt == 0)
461 fatalx("%s: not-referenced community", __func__);
462 comm->refcnt++;
463 rdemem.comm_refs++;
464 return comm;
465 }
466
467 static inline void
communities_unref(struct rde_community * comm)468 communities_unref(struct rde_community *comm)
469 {
470 if (comm == NULL)
471 return;
472 rdemem.comm_refs--;
473 if (--comm->refcnt == 1) /* last ref is hold internally */
474 communities_unlink(comm);
475 }
476
477 int community_to_rd(struct community *, uint64_t *);
478
479 /* rde_decide.c */
480 int prefix_eligible(struct prefix *);
481 struct prefix *prefix_best(struct rib_entry *);
482 void prefix_evaluate(struct rib_entry *, struct prefix *,
483 struct prefix *);
484 void prefix_evaluate_nexthop(struct prefix *, enum nexthop_state,
485 enum nexthop_state);
486
487 /* rde_filter.c */
488 void rde_apply_set(struct filter_set_head *, struct rde_peer *,
489 struct rde_peer *, struct filterstate *, u_int8_t);
490 void rde_filterstate_init(struct filterstate *);
491 void rde_filterstate_prep(struct filterstate *, struct prefix *);
492 void rde_filterstate_copy(struct filterstate *, struct filterstate *);
493 void rde_filterstate_set_vstate(struct filterstate *, uint8_t, uint8_t);
494 void rde_filterstate_clean(struct filterstate *);
495 int rde_filter_skip_rule(struct rde_peer *, struct filter_rule *);
496 int rde_filter_equal(struct filter_head *, struct filter_head *);
497 void rde_filter_calc_skip_steps(struct filter_head *);
498 enum filter_actions rde_filter(struct filter_head *, struct rde_peer *,
499 struct rde_peer *, struct bgpd_addr *, uint8_t,
500 struct filterstate *);
501
502 /* rde_prefix.c */
503 void pt_init(void);
504 void pt_shutdown(void);
505 void pt_getaddr(struct pt_entry *, struct bgpd_addr *);
506 int pt_getflowspec(struct pt_entry *, uint8_t **);
507 struct pt_entry *pt_fill(struct bgpd_addr *, int);
508 struct pt_entry *pt_get(struct bgpd_addr *, int);
509 struct pt_entry *pt_add(struct bgpd_addr *, int);
510 struct pt_entry *pt_get_flow(struct flowspec *);
511 struct pt_entry *pt_add_flow(struct flowspec *);
512 void pt_remove(struct pt_entry *);
513 struct pt_entry *pt_lookup(struct bgpd_addr *);
514 int pt_prefix_cmp(const struct pt_entry *, const struct pt_entry *);
515 int pt_writebuf(struct ibuf *, struct pt_entry *, int, int, uint32_t);
516
517 static inline struct pt_entry *
pt_ref(struct pt_entry * pt)518 pt_ref(struct pt_entry *pt)
519 {
520 ++pt->refcnt;
521 if (pt->refcnt == 0)
522 fatalx("pt_ref: overflow");
523 return pt;
524 }
525
526 static inline void
pt_unref(struct pt_entry * pt)527 pt_unref(struct pt_entry *pt)
528 {
529 if (pt->refcnt == 0)
530 fatalx("pt_unref: underflow");
531 if (--pt->refcnt == 0)
532 pt_remove(pt);
533 }
534
535 /* rde_rib.c */
536 extern uint16_t rib_size;
537
538 struct rib *rib_new(char *, u_int, uint16_t);
539 int rib_update(struct rib *);
540 struct rib *rib_byid(uint16_t);
541 uint16_t rib_find(char *);
542 void rib_free(struct rib *);
543 void rib_shutdown(void);
544 struct rib_entry *rib_get(struct rib *, struct pt_entry *);
545 struct rib_entry *rib_get_addr(struct rib *, struct bgpd_addr *, int);
546 struct rib_entry *rib_match(struct rib *, struct bgpd_addr *);
547 int rib_dump_pending(void);
548 void rib_dump_runner(void);
549 int rib_dump_new(uint16_t, uint8_t, unsigned int, void *,
550 void (*)(struct rib_entry *, void *),
551 void (*)(void *, uint8_t),
552 int (*)(void *));
553 int rib_dump_subtree(uint16_t, struct bgpd_addr *, uint8_t,
554 unsigned int count, void *arg,
555 void (*)(struct rib_entry *, void *),
556 void (*)(void *, uint8_t),
557 int (*)(void *));
558 void rib_dump_terminate(void *);
559
560 extern struct rib flowrib;
561
562 static inline struct rib *
re_rib(struct rib_entry * re)563 re_rib(struct rib_entry *re)
564 {
565 if (re->prefix->aid == AID_FLOWSPECv4 ||
566 re->prefix->aid == AID_FLOWSPECv6)
567 return &flowrib;
568 return rib_byid(re->rib_id);
569 }
570
571 void path_shutdown(void);
572 struct rde_aspath *path_copy(struct rde_aspath *, const struct rde_aspath *);
573 struct rde_aspath *path_prep(struct rde_aspath *);
574 struct rde_aspath *path_get(void);
575 void path_clean(struct rde_aspath *);
576 void path_put(struct rde_aspath *);
577
578 #define PREFIX_SIZE(x) (((x) + 7) / 8 + 1)
579 struct prefix *prefix_get(struct rib *, struct rde_peer *, uint32_t,
580 struct bgpd_addr *, int);
581 struct prefix *prefix_adjout_get(struct rde_peer *, uint32_t,
582 struct pt_entry *);
583 struct prefix *prefix_adjout_first(struct rde_peer *, struct pt_entry *);
584 struct prefix *prefix_adjout_next(struct rde_peer *, struct prefix *);
585 struct prefix *prefix_adjout_lookup(struct rde_peer *, struct bgpd_addr *,
586 int);
587 struct prefix *prefix_adjout_match(struct rde_peer *, struct bgpd_addr *);
588 int prefix_update(struct rib *, struct rde_peer *, uint32_t,
589 uint32_t, struct filterstate *, int, struct bgpd_addr *,
590 int);
591 int prefix_withdraw(struct rib *, struct rde_peer *, uint32_t,
592 struct bgpd_addr *, int);
593 int prefix_flowspec_update(struct rde_peer *, struct filterstate *,
594 struct pt_entry *, uint32_t);
595 int prefix_flowspec_withdraw(struct rde_peer *, struct pt_entry *);
596 void prefix_flowspec_dump(uint8_t, void *,
597 void (*)(struct rib_entry *, void *),
598 void (*)(void *, uint8_t));
599 void prefix_add_eor(struct rde_peer *, uint8_t);
600 void prefix_adjout_update(struct prefix *, struct rde_peer *,
601 struct filterstate *, struct pt_entry *, uint32_t);
602 void prefix_adjout_withdraw(struct prefix *);
603 void prefix_adjout_destroy(struct prefix *);
604 int prefix_dump_new(struct rde_peer *, uint8_t, unsigned int,
605 void *, void (*)(struct prefix *, void *),
606 void (*)(void *, uint8_t), int (*)(void *));
607 int prefix_dump_subtree(struct rde_peer *, struct bgpd_addr *,
608 uint8_t, unsigned int, void *,
609 void (*)(struct prefix *, void *),
610 void (*)(void *, uint8_t), int (*)(void *));
611 struct prefix *prefix_bypeer(struct rib_entry *, struct rde_peer *,
612 uint32_t);
613 void prefix_destroy(struct prefix *);
614
RB_PROTOTYPE(prefix_tree,prefix,entry,prefix_cmp)615 RB_PROTOTYPE(prefix_tree, prefix, entry, prefix_cmp)
616
617 static inline struct rde_peer *
618 prefix_peer(struct prefix *p)
619 {
620 return (p->peer);
621 }
622
623 static inline struct rde_aspath *
prefix_aspath(struct prefix * p)624 prefix_aspath(struct prefix *p)
625 {
626 return (p->aspath);
627 }
628
629 static inline struct rde_community *
prefix_communities(struct prefix * p)630 prefix_communities(struct prefix *p)
631 {
632 return (p->communities);
633 }
634
635 static inline struct nexthop *
prefix_nexthop(struct prefix * p)636 prefix_nexthop(struct prefix *p)
637 {
638 return (p->nexthop);
639 }
640
641 static inline uint8_t
prefix_nhflags(struct prefix * p)642 prefix_nhflags(struct prefix *p)
643 {
644 return (p->nhflags & NEXTHOP_MASK);
645 }
646
647 static inline int
prefix_nhvalid(struct prefix * p)648 prefix_nhvalid(struct prefix *p)
649 {
650 return ((p->nhflags & NEXTHOP_VALID) != 0);
651 }
652
653 static inline uint8_t
prefix_roa_vstate(struct prefix * p)654 prefix_roa_vstate(struct prefix *p)
655 {
656 return (p->validation_state & ROA_MASK);
657 }
658
659 static inline uint8_t
prefix_aspa_vstate(struct prefix * p)660 prefix_aspa_vstate(struct prefix *p)
661 {
662 return (p->validation_state >> 4);
663 }
664
665 static inline void
prefix_set_vstate(struct prefix * p,uint8_t roa_vstate,uint8_t aspa_vstate)666 prefix_set_vstate(struct prefix *p, uint8_t roa_vstate, uint8_t aspa_vstate)
667 {
668 p->validation_state = roa_vstate & ROA_MASK;
669 p->validation_state |= aspa_vstate << 4;
670 }
671
672 static inline struct rib_entry *
prefix_re(struct prefix * p)673 prefix_re(struct prefix *p)
674 {
675 if (p->flags & PREFIX_FLAG_ADJOUT)
676 return NULL;
677 return (p->entry.list.re);
678 }
679
680 static inline int
prefix_filtered(struct prefix * p)681 prefix_filtered(struct prefix *p)
682 {
683 return ((p->flags & PREFIX_FLAG_FILTERED) != 0);
684 }
685
686 void nexthop_shutdown(void);
687 int nexthop_pending(void);
688 void nexthop_runner(void);
689 void nexthop_modify(struct nexthop *, enum action_types, uint8_t,
690 struct nexthop **, uint8_t *);
691 void nexthop_link(struct prefix *);
692 void nexthop_unlink(struct prefix *);
693 void nexthop_update(struct kroute_nexthop *);
694 struct nexthop *nexthop_get(struct bgpd_addr *);
695 struct nexthop *nexthop_ref(struct nexthop *);
696 int nexthop_unref(struct nexthop *);
697
698 /* rde_update.c */
699 void up_generate_updates(struct rde_peer *, struct rib_entry *);
700 void up_generate_addpath(struct rde_peer *, struct rib_entry *);
701 void up_generate_addpath_all(struct rde_peer *, struct rib_entry *,
702 struct prefix *, struct prefix *);
703 void up_generate_default(struct rde_peer *, uint8_t);
704 int up_is_eor(struct rde_peer *, uint8_t);
705 struct ibuf *up_dump_withdraws(struct rde_peer *, uint8_t);
706 struct ibuf *up_dump_update(struct rde_peer *, uint8_t);
707
708 /* rde_aspa.c */
709 void aspa_validation(struct rde_aspa *, struct aspath *,
710 struct rde_aspa_state *);
711 struct rde_aspa *aspa_table_prep(uint32_t, size_t);
712 void aspa_add_set(struct rde_aspa *, uint32_t, const uint32_t *,
713 uint32_t);
714 void aspa_table_free(struct rde_aspa *);
715 void aspa_table_stats(const struct rde_aspa *,
716 struct ctl_show_set *);
717 int aspa_table_equal(const struct rde_aspa *,
718 const struct rde_aspa *);
719 void aspa_table_unchanged(struct rde_aspa *,
720 const struct rde_aspa *);
721
722 #endif /* __RDE_H__ */
723