1 /*
2  * Addpath TX ID selection, and related utilities
3  * Copyright (C) 2018  Amazon.com, Inc. or its affiliates
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the Free
7  * Software Foundation; either version 2 of the License, or (at your option)
8  * any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; see the file COPYING; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  */
19 
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23 
24 #include "bgp_addpath.h"
25 #include "bgp_route.h"
26 
27 static const struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
28 	{
29 		.config_name = "addpath-tx-all-paths",
30 		.human_name = "All",
31 		.human_description = "Advertise all paths via addpath",
32 		.type_json_name = "addpathTxAllPaths",
33 		.id_json_name = "addpathTxIdAll"
34 	},
35 	{
36 		.config_name = "addpath-tx-bestpath-per-AS",
37 		.human_name = "Best-Per-AS",
38 		.human_description = "Advertise bestpath per AS via addpath",
39 		.type_json_name = "addpathTxBestpathPerAS",
40 		.id_json_name = "addpathTxIdBestPerAS"
41 	}
42 };
43 
44 static const struct bgp_addpath_strategy_names unknown_names = {
45 	.config_name = "addpath-tx-unknown",
46 	.human_name = "Unknown-Addpath-Strategy",
47 	.human_description = "Unknown Addpath Strategy",
48 	.type_json_name = "addpathTxUnknown",
49 	.id_json_name = "addpathTxIdUnknown"
50 };
51 
52 /*
53  * Returns a structure full of strings associated with an addpath type. Will
54  * never return null.
55  */
56 const struct bgp_addpath_strategy_names *
bgp_addpath_names(enum bgp_addpath_strat strat)57 bgp_addpath_names(enum bgp_addpath_strat strat)
58 {
59 	if (strat < BGP_ADDPATH_MAX)
60 		return &(strat_names[strat]);
61 	else
62 		return &unknown_names;
63 };
64 
65 /*
66  * Returns if any peer is transmitting addpaths for a given afi/safi.
67  */
bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data * d,afi_t afi,safi_t safi)68 bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
69 				 safi_t safi)
70 {
71 	return d->total_peercount[afi][safi] > 0;
72 }
73 
74 /*
75  * Initialize the BGP instance level data for addpath.
76  */
bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data * d)77 void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
78 {
79 	safi_t safi;
80 	afi_t afi;
81 	int i;
82 
83 	for (afi = AFI_IP; afi < AFI_MAX; afi++) {
84 		for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
85 			for (i = 0; i < BGP_ADDPATH_MAX; i++) {
86 				d->id_allocators[afi][safi][i] = NULL;
87 				d->peercount[afi][safi][i] = 0;
88 			}
89 			d->total_peercount[afi][safi] = 0;
90 		}
91 	}
92 }
93 
94 /*
95  * Free up resources associated with BGP route info structures.
96  */
bgp_addpath_free_info_data(struct bgp_addpath_info_data * d,struct bgp_addpath_node_data * nd)97 void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
98 			      struct bgp_addpath_node_data *nd)
99 {
100 	int i;
101 
102 	for (i = 0; i < BGP_ADDPATH_MAX; i++) {
103 		if (d->addpath_tx_id[i] != IDALLOC_INVALID)
104 			idalloc_free_to_pool(&nd->free_ids[i],
105 					     d->addpath_tx_id[i]);
106 	}
107 }
108 
109 /*
110  * Return the addpath ID used to send a particular route, to a particular peer,
111  * in a particular AFI/SAFI.
112  */
bgp_addpath_id_for_peer(struct peer * peer,afi_t afi,safi_t safi,struct bgp_addpath_info_data * d)113 uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
114 				struct bgp_addpath_info_data *d)
115 {
116 	if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
117 		return d->addpath_tx_id[peer->addpath_type[afi][safi]];
118 	else
119 		return IDALLOC_INVALID;
120 }
121 
122 /*
123  * Returns true if the path has an assigned addpath ID for any of the addpath
124  * strategies.
125  */
bgp_addpath_info_has_ids(struct bgp_addpath_info_data * d)126 bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
127 {
128 	int i;
129 
130 	for (i = 0; i < BGP_ADDPATH_MAX; i++)
131 		if (d->addpath_tx_id[i] != 0)
132 			return true;
133 
134 	return false;
135 }
136 
137 /*
138  * Releases any ID's associated with the BGP prefix.
139  */
bgp_addpath_free_node_data(struct bgp_addpath_bgp_data * bd,struct bgp_addpath_node_data * nd,afi_t afi,safi_t safi)140 void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
141 			      struct bgp_addpath_node_data *nd, afi_t afi,
142 			      safi_t safi)
143 {
144 	int i;
145 
146 	for (i = 0; i < BGP_ADDPATH_MAX; i++) {
147 		idalloc_drain_pool(bd->id_allocators[afi][safi][i],
148 				   &(nd->free_ids[i]));
149 	}
150 }
151 
152 /*
153  * Check to see if the addpath strategy requires DMED to be configured to work.
154  */
bgp_addpath_dmed_required(int strategy)155 bool bgp_addpath_dmed_required(int strategy)
156 {
157 	return strategy == BGP_ADDPATH_BEST_PER_AS;
158 }
159 
160 /*
161  * Return true if this is a path we should advertise due to a
162  * configured addpath-tx knob
163  */
bgp_addpath_tx_path(enum bgp_addpath_strat strat,struct bgp_path_info * pi)164 bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, struct bgp_path_info *pi)
165 {
166 	switch (strat) {
167 	case BGP_ADDPATH_NONE:
168 		return false;
169 	case BGP_ADDPATH_ALL:
170 		return true;
171 	case BGP_ADDPATH_BEST_PER_AS:
172 		if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
173 			return true;
174 		else
175 			return false;
176 	default:
177 		return false;
178 	}
179 }
180 
bgp_addpath_flush_type_rn(struct bgp * bgp,afi_t afi,safi_t safi,enum bgp_addpath_strat addpath_type,struct bgp_dest * dest)181 static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
182 				      enum bgp_addpath_strat addpath_type,
183 				      struct bgp_dest *dest)
184 {
185 	struct bgp_path_info *pi;
186 
187 	idalloc_drain_pool(
188 		bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
189 		&(dest->tx_addpath.free_ids[addpath_type]));
190 	for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
191 		if (pi->tx_addpath.addpath_tx_id[addpath_type]
192 		    != IDALLOC_INVALID) {
193 			idalloc_free(
194 				bgp->tx_addpath
195 					.id_allocators[afi][safi][addpath_type],
196 				pi->tx_addpath.addpath_tx_id[addpath_type]);
197 			pi->tx_addpath.addpath_tx_id[addpath_type] =
198 				IDALLOC_INVALID;
199 		}
200 	}
201 }
202 
203 /*
204  * Purge all addpath ID's on a BGP instance associated with the addpath
205  * strategy, and afi/safi combination. This lets us let go of all memory held to
206  * track ID numbers associated with an addpath type not in use. Since
207  * post-bestpath ID processing is skipped for types not used, this is the only
208  * chance to free this data.
209  */
bgp_addpath_flush_type(struct bgp * bgp,afi_t afi,safi_t safi,enum bgp_addpath_strat addpath_type)210 static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
211 				   enum bgp_addpath_strat addpath_type)
212 {
213 	struct bgp_dest *dest, *ndest;
214 
215 	for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
216 	     dest = bgp_route_next(dest)) {
217 		if (safi == SAFI_MPLS_VPN) {
218 			struct bgp_table *table;
219 
220 			table = bgp_dest_get_bgp_table_info(dest);
221 			if (!table)
222 				continue;
223 
224 			for (ndest = bgp_table_top(table); ndest;
225 			     ndest = bgp_route_next(ndest))
226 				bgp_addpath_flush_type_rn(bgp, afi, safi,
227 							  addpath_type, ndest);
228 		} else {
229 			bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
230 						  dest);
231 		}
232 	}
233 
234 	idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
235 	bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
236 }
237 
238 /*
239  * Allocate an Addpath ID for the given type on a path, if necessary.
240  */
bgp_addpath_populate_path(struct id_alloc * allocator,struct bgp_path_info * path,enum bgp_addpath_strat addpath_type)241 static void bgp_addpath_populate_path(struct id_alloc *allocator,
242 				      struct bgp_path_info *path,
243 				      enum bgp_addpath_strat addpath_type)
244 {
245 	if (bgp_addpath_tx_path(addpath_type, path)) {
246 		path->tx_addpath.addpath_tx_id[addpath_type] =
247 			idalloc_allocate(allocator);
248 	}
249 }
250 
251 /*
252  * Compute addpath ID's on a BGP instance associated with the addpath strategy,
253  * and afi/safi combination. Since we won't waste the time computing addpath IDs
254  * for unused strategies, the first time a peer is configured to use a strategy,
255  * we have to backfill the data.
256  */
bgp_addpath_populate_type(struct bgp * bgp,afi_t afi,safi_t safi,enum bgp_addpath_strat addpath_type)257 static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
258 				    enum bgp_addpath_strat addpath_type)
259 {
260 	struct bgp_dest *dest, *ndest;
261 	char buf[200];
262 	struct id_alloc *allocator;
263 
264 	snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
265 		 bgp_addpath_names(addpath_type)->config_name, (int)afi,
266 		 (int)safi);
267 	buf[sizeof(buf) - 1] = '\0';
268 	zlog_info("Computing addpath IDs for addpath type %s",
269 		bgp_addpath_names(addpath_type)->human_name);
270 
271 	bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
272 		idalloc_new(buf);
273 
274 	idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
275 		BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
276 
277 	allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
278 
279 	for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
280 	     dest = bgp_route_next(dest)) {
281 		struct bgp_path_info *bi;
282 
283 		if (safi == SAFI_MPLS_VPN) {
284 			struct bgp_table *table;
285 
286 			table = bgp_dest_get_bgp_table_info(dest);
287 			if (!table)
288 				continue;
289 
290 			for (ndest = bgp_table_top(table); ndest;
291 			     ndest = bgp_route_next(ndest))
292 				for (bi = bgp_dest_get_bgp_path_info(ndest); bi;
293 				     bi = bi->next)
294 					bgp_addpath_populate_path(allocator, bi,
295 								  addpath_type);
296 		} else {
297 			for (bi = bgp_dest_get_bgp_path_info(dest); bi;
298 			     bi = bi->next)
299 				bgp_addpath_populate_path(allocator, bi,
300 							  addpath_type);
301 		}
302 	}
303 }
304 
305 /*
306  * Handle updates to a peer or group's addpath strategy. If after adjusting
307  * counts a addpath strategy is in use for the first time, or no longer in use,
308  * the IDs for that strategy will be populated or flushed.
309  */
bgp_addpath_type_changed(struct bgp * bgp)310 void bgp_addpath_type_changed(struct bgp *bgp)
311 {
312 	afi_t afi;
313 	safi_t safi;
314 	struct listnode *node, *nnode;
315 	struct peer *peer;
316 	int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
317 	enum bgp_addpath_strat type;
318 
319 	FOREACH_AFI_SAFI(afi, safi) {
320 		for (type=0; type<BGP_ADDPATH_MAX; type++) {
321 			peer_count[afi][safi][type] = 0;
322 		}
323 		bgp->tx_addpath.total_peercount[afi][safi] = 0;
324 	}
325 
326 	for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
327 		FOREACH_AFI_SAFI(afi, safi) {
328 			type = peer->addpath_type[afi][safi];
329 			if (type != BGP_ADDPATH_NONE) {
330 				peer_count[afi][safi][type] += 1;
331 				bgp->tx_addpath.total_peercount[afi][safi] += 1;
332 			}
333 		}
334 	}
335 
336 	FOREACH_AFI_SAFI(afi, safi) {
337 		for (type=0; type<BGP_ADDPATH_MAX; type++) {
338 			int old = bgp->tx_addpath.peercount[afi][safi][type];
339 			int new = peer_count[afi][safi][type];
340 
341 			bgp->tx_addpath.peercount[afi][safi][type] = new;
342 
343 			if (old == 0 && new != 0) {
344 				bgp_addpath_populate_type(bgp, afi, safi,
345 					type);
346 			} else if (old != 0 && new == 0) {
347 				bgp_addpath_flush_type(bgp, afi, safi, type);
348 			}
349 		}
350 	}
351 }
352 
353 /*
354  * Change the addpath type assigned to a peer, or peer group. In addition to
355  * adjusting the counts, peer sessions will be reset as needed to make the
356  * change take effect.
357  */
bgp_addpath_set_peer_type(struct peer * peer,afi_t afi,safi_t safi,enum bgp_addpath_strat addpath_type)358 void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
359 			      enum bgp_addpath_strat addpath_type)
360 {
361 	struct bgp *bgp = peer->bgp;
362 	enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi];
363 	struct listnode *node, *nnode;
364 	struct peer *tmp_peer;
365 	struct peer_group *group;
366 
367 	if (addpath_type == old_type)
368 		return;
369 
370 	if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
371 	    !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
372 		/* A "no" config on a group member inherits group */
373 		addpath_type = peer->group->conf->addpath_type[afi][safi];
374 	}
375 
376 	peer->addpath_type[afi][safi] = addpath_type;
377 
378 	bgp_addpath_type_changed(bgp);
379 
380 	if (addpath_type != BGP_ADDPATH_NONE) {
381 		if (bgp_addpath_dmed_required(addpath_type)) {
382 			if (!CHECK_FLAG(bgp->flags,
383 					BGP_FLAG_DETERMINISTIC_MED)) {
384 				zlog_warn(
385 					"%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
386 					peer->host);
387 				SET_FLAG(bgp->flags,
388 					 BGP_FLAG_DETERMINISTIC_MED);
389 				bgp_recalculate_all_bestpaths(bgp);
390 			}
391 		}
392 	}
393 
394 	zlog_info("Resetting peer %s%s due to change in addpath config",
395 		  CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
396 		  peer->host);
397 
398 	if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
399 		group = peer->group;
400 
401 		/* group will be null as peer_group_delete calls peer_delete on
402 		 * group->conf. That peer_delete will eventuallly end up here
403 		 * if the group was configured to tx addpaths.
404 		 */
405 		if (group != NULL) {
406 			for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
407 			     tmp_peer)) {
408 				if (tmp_peer->addpath_type[afi][safi] ==
409 				    old_type) {
410 					bgp_addpath_set_peer_type(tmp_peer,
411 								 afi,
412 								 safi,
413 								 addpath_type);
414 				}
415 			}
416 		}
417 	} else {
418 		peer_change_action(peer, afi, safi, peer_change_reset);
419 	}
420 
421 }
422 
423 /*
424  * Intended to run after bestpath. This function will take TX IDs from paths
425  * that no longer need them, and give them to paths that do. This prevents
426  * best-per-as updates from needing to do a separate withdraw and update just to
427  * swap out which path is sent.
428  */
bgp_addpath_update_ids(struct bgp * bgp,struct bgp_dest * bn,afi_t afi,safi_t safi)429 void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *bn, afi_t afi,
430 			    safi_t safi)
431 {
432 	int i;
433 	struct bgp_path_info *pi;
434 	struct id_alloc_pool **pool_ptr;
435 
436 	for (i = 0; i < BGP_ADDPATH_MAX; i++) {
437 		struct id_alloc *alloc =
438 			bgp->tx_addpath.id_allocators[afi][safi][i];
439 		pool_ptr = &(bn->tx_addpath.free_ids[i]);
440 
441 		if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
442 			continue;
443 
444 		/* Free Unused IDs back to the pool.*/
445 		for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
446 			if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
447 			    && !bgp_addpath_tx_path(i, pi)) {
448 				idalloc_free_to_pool(pool_ptr,
449 					pi->tx_addpath.addpath_tx_id[i]);
450 				pi->tx_addpath.addpath_tx_id[i] =
451 					IDALLOC_INVALID;
452 			}
453 		}
454 
455 		/* Give IDs to paths that need them (pulling from the pool) */
456 		for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
457 			if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
458 			    && bgp_addpath_tx_path(i, pi)) {
459 				pi->tx_addpath.addpath_tx_id[i] =
460 					idalloc_allocate_prefer_pool(
461 						alloc, pool_ptr);
462 			}
463 		}
464 
465 		/* Free any IDs left in the pool to the main allocator */
466 		idalloc_drain_pool(alloc, pool_ptr);
467 	}
468 }
469