xref: /freebsd/sys/contrib/dpdk_rte_lpm/rte_lpm.c (revision 537d1343)
1537d1343SAlexander V. Chernikov /* SPDX-License-Identifier: BSD-3-Clause
2537d1343SAlexander V. Chernikov  * Copyright(c) 2010-2014 Intel Corporation
3537d1343SAlexander V. Chernikov  */
4537d1343SAlexander V. Chernikov 
5537d1343SAlexander V. Chernikov #include <sys/param.h>
6537d1343SAlexander V. Chernikov #include <sys/ctype.h>
7537d1343SAlexander V. Chernikov #include <sys/systm.h>
8537d1343SAlexander V. Chernikov #include <sys/lock.h>
9537d1343SAlexander V. Chernikov #include <sys/rwlock.h>
10537d1343SAlexander V. Chernikov #include <sys/malloc.h>
11537d1343SAlexander V. Chernikov #include <sys/mbuf.h>
12537d1343SAlexander V. Chernikov #include <sys/socket.h>
13537d1343SAlexander V. Chernikov #include <sys/kernel.h>
14537d1343SAlexander V. Chernikov 
15537d1343SAlexander V. Chernikov int errno = 0, rte_errno = 0;
16537d1343SAlexander V. Chernikov 
17537d1343SAlexander V. Chernikov #if 0
18537d1343SAlexander V. Chernikov #include <string.h>
19537d1343SAlexander V. Chernikov #include <stdint.h>
20537d1343SAlexander V. Chernikov #include <errno.h>
21537d1343SAlexander V. Chernikov #include <stdarg.h>
22537d1343SAlexander V. Chernikov #include <stdio.h>
23537d1343SAlexander V. Chernikov #include <sys/queue.h>
24537d1343SAlexander V. Chernikov 
25537d1343SAlexander V. Chernikov #include <rte_log.h>
26537d1343SAlexander V. Chernikov #include <rte_branch_prediction.h>
27537d1343SAlexander V. Chernikov #include <rte_common.h>
28537d1343SAlexander V. Chernikov #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
29537d1343SAlexander V. Chernikov #include <rte_malloc.h>
30537d1343SAlexander V. Chernikov #include <rte_eal.h>
31537d1343SAlexander V. Chernikov #include <rte_eal_memconfig.h>
32537d1343SAlexander V. Chernikov #include <rte_per_lcore.h>
33537d1343SAlexander V. Chernikov #include <rte_string_fns.h>
34537d1343SAlexander V. Chernikov #include <rte_errno.h>
35537d1343SAlexander V. Chernikov #include <rte_rwlock.h>
36537d1343SAlexander V. Chernikov #include <rte_spinlock.h>
37537d1343SAlexander V. Chernikov #include <rte_tailq.h>
38537d1343SAlexander V. Chernikov #endif
39537d1343SAlexander V. Chernikov 
40537d1343SAlexander V. Chernikov #include "rte_shim.h"
41537d1343SAlexander V. Chernikov #include "rte_lpm.h"
42537d1343SAlexander V. Chernikov 
43537d1343SAlexander V. Chernikov #if 0
44537d1343SAlexander V. Chernikov TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
45537d1343SAlexander V. Chernikov 
46537d1343SAlexander V. Chernikov static struct rte_tailq_elem rte_lpm_tailq = {
47537d1343SAlexander V. Chernikov 	.name = "RTE_LPM",
48537d1343SAlexander V. Chernikov };
49537d1343SAlexander V. Chernikov EAL_REGISTER_TAILQ(rte_lpm_tailq)
50537d1343SAlexander V. Chernikov #endif
51537d1343SAlexander V. Chernikov 
52537d1343SAlexander V. Chernikov #define MAX_DEPTH_TBL24 24
53537d1343SAlexander V. Chernikov 
54537d1343SAlexander V. Chernikov enum valid_flag {
55537d1343SAlexander V. Chernikov 	INVALID = 0,
56537d1343SAlexander V. Chernikov 	VALID
57537d1343SAlexander V. Chernikov };
58537d1343SAlexander V. Chernikov 
59537d1343SAlexander V. Chernikov /* Macro to enable/disable run-time checks. */
60537d1343SAlexander V. Chernikov #if defined(RTE_LIBRTE_LPM_DEBUG)
61537d1343SAlexander V. Chernikov #include <rte_debug.h>
62537d1343SAlexander V. Chernikov #define VERIFY_DEPTH(depth) do {                                \
63537d1343SAlexander V. Chernikov 	if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
64537d1343SAlexander V. Chernikov 		rte_panic("LPM: Invalid depth (%u) at line %d", \
65537d1343SAlexander V. Chernikov 				(unsigned)(depth), __LINE__);   \
66537d1343SAlexander V. Chernikov } while (0)
67537d1343SAlexander V. Chernikov #else
68537d1343SAlexander V. Chernikov #define VERIFY_DEPTH(depth)
69537d1343SAlexander V. Chernikov #endif
70537d1343SAlexander V. Chernikov 
71537d1343SAlexander V. Chernikov /*
72537d1343SAlexander V. Chernikov  * Converts a given depth value to its corresponding mask value.
73537d1343SAlexander V. Chernikov  *
74537d1343SAlexander V. Chernikov  * depth  (IN)		: range = 1 - 32
75537d1343SAlexander V. Chernikov  * mask   (OUT)		: 32bit mask
76537d1343SAlexander V. Chernikov  */
77537d1343SAlexander V. Chernikov static uint32_t __attribute__((pure))
depth_to_mask(uint8_t depth)78537d1343SAlexander V. Chernikov depth_to_mask(uint8_t depth)
79537d1343SAlexander V. Chernikov {
80537d1343SAlexander V. Chernikov 	VERIFY_DEPTH(depth);
81537d1343SAlexander V. Chernikov 
82537d1343SAlexander V. Chernikov 	/* To calculate a mask start with a 1 on the left hand side and right
83537d1343SAlexander V. Chernikov 	 * shift while populating the left hand side with 1's
84537d1343SAlexander V. Chernikov 	 */
85537d1343SAlexander V. Chernikov 	return (int)0x80000000 >> (depth - 1);
86537d1343SAlexander V. Chernikov }
87537d1343SAlexander V. Chernikov 
88537d1343SAlexander V. Chernikov /*
89537d1343SAlexander V. Chernikov  * Converts given depth value to its corresponding range value.
90537d1343SAlexander V. Chernikov  */
91537d1343SAlexander V. Chernikov static uint32_t __attribute__((pure))
depth_to_range(uint8_t depth)92537d1343SAlexander V. Chernikov depth_to_range(uint8_t depth)
93537d1343SAlexander V. Chernikov {
94537d1343SAlexander V. Chernikov 	VERIFY_DEPTH(depth);
95537d1343SAlexander V. Chernikov 
96537d1343SAlexander V. Chernikov 	/*
97537d1343SAlexander V. Chernikov 	 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
98537d1343SAlexander V. Chernikov 	 */
99537d1343SAlexander V. Chernikov 	if (depth <= MAX_DEPTH_TBL24)
100537d1343SAlexander V. Chernikov 		return 1 << (MAX_DEPTH_TBL24 - depth);
101537d1343SAlexander V. Chernikov 
102537d1343SAlexander V. Chernikov 	/* Else if depth is greater than 24 */
103537d1343SAlexander V. Chernikov 	return 1 << (RTE_LPM_MAX_DEPTH - depth);
104537d1343SAlexander V. Chernikov }
105537d1343SAlexander V. Chernikov 
106537d1343SAlexander V. Chernikov #if 0
107537d1343SAlexander V. Chernikov /*
108537d1343SAlexander V. Chernikov  * Find an existing lpm table and return a pointer to it.
109537d1343SAlexander V. Chernikov  */
110537d1343SAlexander V. Chernikov struct rte_lpm *
111537d1343SAlexander V. Chernikov rte_lpm_find_existing(const char *name)
112537d1343SAlexander V. Chernikov {
113537d1343SAlexander V. Chernikov 	struct rte_lpm *l = NULL;
114537d1343SAlexander V. Chernikov 	struct rte_tailq_entry *te;
115537d1343SAlexander V. Chernikov 	struct rte_lpm_list *lpm_list;
116537d1343SAlexander V. Chernikov 
117537d1343SAlexander V. Chernikov 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
118537d1343SAlexander V. Chernikov 
119537d1343SAlexander V. Chernikov 	rte_mcfg_tailq_read_lock();
120537d1343SAlexander V. Chernikov 	TAILQ_FOREACH(te, lpm_list, next) {
121537d1343SAlexander V. Chernikov 		l = te->data;
122537d1343SAlexander V. Chernikov 		if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
123537d1343SAlexander V. Chernikov 			break;
124537d1343SAlexander V. Chernikov 	}
125537d1343SAlexander V. Chernikov 	rte_mcfg_tailq_read_unlock();
126537d1343SAlexander V. Chernikov 
127537d1343SAlexander V. Chernikov 	if (te == NULL) {
128537d1343SAlexander V. Chernikov 		rte_errno = ENOENT;
129537d1343SAlexander V. Chernikov 		return NULL;
130537d1343SAlexander V. Chernikov 	}
131537d1343SAlexander V. Chernikov 
132537d1343SAlexander V. Chernikov 	return l;
133537d1343SAlexander V. Chernikov }
134537d1343SAlexander V. Chernikov #endif
135537d1343SAlexander V. Chernikov 
136537d1343SAlexander V. Chernikov /*
137537d1343SAlexander V. Chernikov  * Allocates memory for LPM object
138537d1343SAlexander V. Chernikov  */
139537d1343SAlexander V. Chernikov struct rte_lpm *
rte_lpm_create(const char * name,int socket_id,const struct rte_lpm_config * config)140537d1343SAlexander V. Chernikov rte_lpm_create(const char *name, int socket_id,
141537d1343SAlexander V. Chernikov 		const struct rte_lpm_config *config)
142537d1343SAlexander V. Chernikov {
143537d1343SAlexander V. Chernikov 	char mem_name[RTE_LPM_NAMESIZE];
144537d1343SAlexander V. Chernikov 	struct rte_lpm *lpm = NULL;
145537d1343SAlexander V. Chernikov 	//struct rte_tailq_entry *te;
146537d1343SAlexander V. Chernikov 	uint32_t mem_size, rules_size, tbl8s_size;
147537d1343SAlexander V. Chernikov 	//struct rte_lpm_list *lpm_list;
148537d1343SAlexander V. Chernikov 
149537d1343SAlexander V. Chernikov 	//lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
150537d1343SAlexander V. Chernikov 
151537d1343SAlexander V. Chernikov 	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
152537d1343SAlexander V. Chernikov 
153537d1343SAlexander V. Chernikov 	/* Check user arguments. */
154537d1343SAlexander V. Chernikov 	if ((name == NULL) || (socket_id < -1)
155537d1343SAlexander V. Chernikov 			|| config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
156537d1343SAlexander V. Chernikov 		rte_errno = EINVAL;
157537d1343SAlexander V. Chernikov 		return NULL;
158537d1343SAlexander V. Chernikov 	}
159537d1343SAlexander V. Chernikov 
160537d1343SAlexander V. Chernikov 	snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
161537d1343SAlexander V. Chernikov 
162537d1343SAlexander V. Chernikov 	/* Determine the amount of memory to allocate. */
163537d1343SAlexander V. Chernikov 	mem_size = sizeof(*lpm);
164537d1343SAlexander V. Chernikov 	rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
165537d1343SAlexander V. Chernikov 	tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
166537d1343SAlexander V. Chernikov 			RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
167537d1343SAlexander V. Chernikov 
168537d1343SAlexander V. Chernikov #if 0
169537d1343SAlexander V. Chernikov 	rte_mcfg_tailq_write_lock();
170537d1343SAlexander V. Chernikov 
171537d1343SAlexander V. Chernikov 	/* guarantee there's no existing */
172537d1343SAlexander V. Chernikov 	TAILQ_FOREACH(te, lpm_list, next) {
173537d1343SAlexander V. Chernikov 		lpm = te->data;
174537d1343SAlexander V. Chernikov 		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
175537d1343SAlexander V. Chernikov 			break;
176537d1343SAlexander V. Chernikov 	}
177537d1343SAlexander V. Chernikov 
178537d1343SAlexander V. Chernikov 	if (te != NULL) {
179537d1343SAlexander V. Chernikov 		lpm = NULL;
180537d1343SAlexander V. Chernikov 		rte_errno = EEXIST;
181537d1343SAlexander V. Chernikov 		goto exit;
182537d1343SAlexander V. Chernikov 	}
183537d1343SAlexander V. Chernikov 
184537d1343SAlexander V. Chernikov 	/* allocate tailq entry */
185537d1343SAlexander V. Chernikov 	te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
186537d1343SAlexander V. Chernikov 	if (te == NULL) {
187537d1343SAlexander V. Chernikov 		RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
188537d1343SAlexander V. Chernikov 		rte_errno = ENOMEM;
189537d1343SAlexander V. Chernikov 		goto exit;
190537d1343SAlexander V. Chernikov 	}
191537d1343SAlexander V. Chernikov #endif
192537d1343SAlexander V. Chernikov 
193537d1343SAlexander V. Chernikov 	/* Allocate memory to store the LPM data structures. */
194537d1343SAlexander V. Chernikov 	lpm = rte_zmalloc_socket(mem_name, mem_size,
195537d1343SAlexander V. Chernikov 			RTE_CACHE_LINE_SIZE, socket_id);
196537d1343SAlexander V. Chernikov 	if (lpm == NULL) {
197537d1343SAlexander V. Chernikov 		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
198537d1343SAlexander V. Chernikov 		//rte_free(te);
199537d1343SAlexander V. Chernikov 		rte_errno = ENOMEM;
200537d1343SAlexander V. Chernikov 		goto exit;
201537d1343SAlexander V. Chernikov 	}
202537d1343SAlexander V. Chernikov 
203537d1343SAlexander V. Chernikov 	lpm->rules_tbl = rte_zmalloc_socket(NULL,
204537d1343SAlexander V. Chernikov 			(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
205537d1343SAlexander V. Chernikov 
206537d1343SAlexander V. Chernikov 	if (lpm->rules_tbl == NULL) {
207537d1343SAlexander V. Chernikov 		RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
208537d1343SAlexander V. Chernikov 		rte_free(lpm);
209537d1343SAlexander V. Chernikov 		lpm = NULL;
210537d1343SAlexander V. Chernikov 		//rte_free(te);
211537d1343SAlexander V. Chernikov 		rte_errno = ENOMEM;
212537d1343SAlexander V. Chernikov 		goto exit;
213537d1343SAlexander V. Chernikov 	}
214537d1343SAlexander V. Chernikov 
215537d1343SAlexander V. Chernikov 	lpm->tbl8 = rte_zmalloc_socket(NULL,
216537d1343SAlexander V. Chernikov 			(size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
217537d1343SAlexander V. Chernikov 
218537d1343SAlexander V. Chernikov 	if (lpm->tbl8 == NULL) {
219537d1343SAlexander V. Chernikov 		RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
220537d1343SAlexander V. Chernikov 		rte_free(lpm->rules_tbl);
221537d1343SAlexander V. Chernikov 		rte_free(lpm);
222537d1343SAlexander V. Chernikov 		lpm = NULL;
223537d1343SAlexander V. Chernikov 		//rte_free(te);
224537d1343SAlexander V. Chernikov 		rte_errno = ENOMEM;
225537d1343SAlexander V. Chernikov 		goto exit;
226537d1343SAlexander V. Chernikov 	}
227537d1343SAlexander V. Chernikov 
228537d1343SAlexander V. Chernikov 	/* Save user arguments. */
229537d1343SAlexander V. Chernikov 	lpm->max_rules = config->max_rules;
230537d1343SAlexander V. Chernikov 	lpm->number_tbl8s = config->number_tbl8s;
231537d1343SAlexander V. Chernikov 	strlcpy(lpm->name, name, sizeof(lpm->name));
232537d1343SAlexander V. Chernikov 
233537d1343SAlexander V. Chernikov 	//te->data = lpm;
234537d1343SAlexander V. Chernikov 
235537d1343SAlexander V. Chernikov 	//TAILQ_INSERT_TAIL(lpm_list, te, next);
236537d1343SAlexander V. Chernikov 
237537d1343SAlexander V. Chernikov exit:
238537d1343SAlexander V. Chernikov 	rte_mcfg_tailq_write_unlock();
239537d1343SAlexander V. Chernikov 
240537d1343SAlexander V. Chernikov 	return lpm;
241537d1343SAlexander V. Chernikov }
242537d1343SAlexander V. Chernikov 
243537d1343SAlexander V. Chernikov /*
244537d1343SAlexander V. Chernikov  * Deallocates memory for given LPM table.
245537d1343SAlexander V. Chernikov  */
246537d1343SAlexander V. Chernikov void
rte_lpm_free(struct rte_lpm * lpm)247537d1343SAlexander V. Chernikov rte_lpm_free(struct rte_lpm *lpm)
248537d1343SAlexander V. Chernikov {
249537d1343SAlexander V. Chernikov #if 0
250537d1343SAlexander V. Chernikov 	struct rte_lpm_list *lpm_list;
251537d1343SAlexander V. Chernikov 	struct rte_tailq_entry *te;
252537d1343SAlexander V. Chernikov 
253537d1343SAlexander V. Chernikov 	/* Check user arguments. */
254537d1343SAlexander V. Chernikov 	if (lpm == NULL)
255537d1343SAlexander V. Chernikov 		return;
256537d1343SAlexander V. Chernikov 
257537d1343SAlexander V. Chernikov 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
258537d1343SAlexander V. Chernikov 
259537d1343SAlexander V. Chernikov 	rte_mcfg_tailq_write_lock();
260537d1343SAlexander V. Chernikov 
261537d1343SAlexander V. Chernikov 	/* find our tailq entry */
262537d1343SAlexander V. Chernikov 	TAILQ_FOREACH(te, lpm_list, next) {
263537d1343SAlexander V. Chernikov 		if (te->data == (void *) lpm)
264537d1343SAlexander V. Chernikov 			break;
265537d1343SAlexander V. Chernikov 	}
266537d1343SAlexander V. Chernikov 	if (te != NULL)
267537d1343SAlexander V. Chernikov 		TAILQ_REMOVE(lpm_list, te, next);
268537d1343SAlexander V. Chernikov 
269537d1343SAlexander V. Chernikov 	rte_mcfg_tailq_write_unlock();
270537d1343SAlexander V. Chernikov #endif
271537d1343SAlexander V. Chernikov 
272537d1343SAlexander V. Chernikov 	rte_free(lpm->tbl8);
273537d1343SAlexander V. Chernikov 	rte_free(lpm->rules_tbl);
274537d1343SAlexander V. Chernikov 	rte_free(lpm);
275537d1343SAlexander V. Chernikov 	//rte_free(te);
276537d1343SAlexander V. Chernikov }
277537d1343SAlexander V. Chernikov 
278537d1343SAlexander V. Chernikov #if 0
279537d1343SAlexander V. Chernikov /*
280537d1343SAlexander V. Chernikov  * Adds a rule to the rule table.
281537d1343SAlexander V. Chernikov  *
282537d1343SAlexander V. Chernikov  * NOTE: The rule table is split into 32 groups. Each group contains rules that
283537d1343SAlexander V. Chernikov  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
284537d1343SAlexander V. Chernikov  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
285537d1343SAlexander V. Chernikov  * to refer to depth 1 because even though the depth range is 1 - 32, depths
286537d1343SAlexander V. Chernikov  * are stored in the rule table from 0 - 31.
287537d1343SAlexander V. Chernikov  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
288537d1343SAlexander V. Chernikov  */
289537d1343SAlexander V. Chernikov static int32_t
290537d1343SAlexander V. Chernikov rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
291537d1343SAlexander V. Chernikov 	uint32_t next_hop)
292537d1343SAlexander V. Chernikov {
293537d1343SAlexander V. Chernikov 	uint32_t rule_gindex, rule_index, last_rule;
294537d1343SAlexander V. Chernikov 	int i;
295537d1343SAlexander V. Chernikov 
296537d1343SAlexander V. Chernikov 	VERIFY_DEPTH(depth);
297537d1343SAlexander V. Chernikov 
298537d1343SAlexander V. Chernikov 	/* Scan through rule group to see if rule already exists. */
299537d1343SAlexander V. Chernikov 	if (lpm->rule_info[depth - 1].used_rules > 0) {
300537d1343SAlexander V. Chernikov 
301537d1343SAlexander V. Chernikov 		/* rule_gindex stands for rule group index. */
302537d1343SAlexander V. Chernikov 		rule_gindex = lpm->rule_info[depth - 1].first_rule;
303537d1343SAlexander V. Chernikov 		/* Initialise rule_index to point to start of rule group. */
304537d1343SAlexander V. Chernikov 		rule_index = rule_gindex;
305537d1343SAlexander V. Chernikov 		/* Last rule = Last used rule in this rule group. */
306537d1343SAlexander V. Chernikov 		last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
307537d1343SAlexander V. Chernikov 
308537d1343SAlexander V. Chernikov 		for (; rule_index < last_rule; rule_index++) {
309537d1343SAlexander V. Chernikov 
310537d1343SAlexander V. Chernikov 			/* If rule already exists update next hop and return. */
311537d1343SAlexander V. Chernikov 			if (lpm->rules_tbl[rule_index].ip == ip_masked) {
312537d1343SAlexander V. Chernikov 
313537d1343SAlexander V. Chernikov 				if (lpm->rules_tbl[rule_index].next_hop
314537d1343SAlexander V. Chernikov 						== next_hop)
315537d1343SAlexander V. Chernikov 					return -EEXIST;
316537d1343SAlexander V. Chernikov 				lpm->rules_tbl[rule_index].next_hop = next_hop;
317537d1343SAlexander V. Chernikov 
318537d1343SAlexander V. Chernikov 				return rule_index;
319537d1343SAlexander V. Chernikov 			}
320537d1343SAlexander V. Chernikov 		}
321537d1343SAlexander V. Chernikov 
322537d1343SAlexander V. Chernikov 		if (rule_index == lpm->max_rules)
323537d1343SAlexander V. Chernikov 			return -ENOSPC;
324537d1343SAlexander V. Chernikov 	} else {
325537d1343SAlexander V. Chernikov 		/* Calculate the position in which the rule will be stored. */
326537d1343SAlexander V. Chernikov 		rule_index = 0;
327537d1343SAlexander V. Chernikov 
328537d1343SAlexander V. Chernikov 		for (i = depth - 1; i > 0; i--) {
329537d1343SAlexander V. Chernikov 			if (lpm->rule_info[i - 1].used_rules > 0) {
330537d1343SAlexander V. Chernikov 				rule_index = lpm->rule_info[i - 1].first_rule
331537d1343SAlexander V. Chernikov 						+ lpm->rule_info[i - 1].used_rules;
332537d1343SAlexander V. Chernikov 				break;
333537d1343SAlexander V. Chernikov 			}
334537d1343SAlexander V. Chernikov 		}
335537d1343SAlexander V. Chernikov 		if (rule_index == lpm->max_rules)
336537d1343SAlexander V. Chernikov 			return -ENOSPC;
337537d1343SAlexander V. Chernikov 
338537d1343SAlexander V. Chernikov 		lpm->rule_info[depth - 1].first_rule = rule_index;
339537d1343SAlexander V. Chernikov 	}
340537d1343SAlexander V. Chernikov 
341537d1343SAlexander V. Chernikov 	/* Make room for the new rule in the array. */
342537d1343SAlexander V. Chernikov 	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
343537d1343SAlexander V. Chernikov 		if (lpm->rule_info[i - 1].first_rule
344537d1343SAlexander V. Chernikov 				+ lpm->rule_info[i - 1].used_rules == lpm->max_rules)
345537d1343SAlexander V. Chernikov 			return -ENOSPC;
346537d1343SAlexander V. Chernikov 
347537d1343SAlexander V. Chernikov 		if (lpm->rule_info[i - 1].used_rules > 0) {
348537d1343SAlexander V. Chernikov 			lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
349537d1343SAlexander V. Chernikov 				+ lpm->rule_info[i - 1].used_rules]
350537d1343SAlexander V. Chernikov 					= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
351537d1343SAlexander V. Chernikov 			lpm->rule_info[i - 1].first_rule++;
352537d1343SAlexander V. Chernikov 		}
353537d1343SAlexander V. Chernikov 	}
354537d1343SAlexander V. Chernikov 
355537d1343SAlexander V. Chernikov 	/* Add the new rule. */
356537d1343SAlexander V. Chernikov 	lpm->rules_tbl[rule_index].ip = ip_masked;
357537d1343SAlexander V. Chernikov 	lpm->rules_tbl[rule_index].next_hop = next_hop;
358537d1343SAlexander V. Chernikov 
359537d1343SAlexander V. Chernikov 	/* Increment the used rules counter for this rule group. */
360537d1343SAlexander V. Chernikov 	lpm->rule_info[depth - 1].used_rules++;
361537d1343SAlexander V. Chernikov 
362537d1343SAlexander V. Chernikov 	return rule_index;
363537d1343SAlexander V. Chernikov }
364537d1343SAlexander V. Chernikov 
365537d1343SAlexander V. Chernikov /*
366537d1343SAlexander V. Chernikov  * Delete a rule from the rule table.
367537d1343SAlexander V. Chernikov  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
368537d1343SAlexander V. Chernikov  */
369537d1343SAlexander V. Chernikov static void
370537d1343SAlexander V. Chernikov rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
371537d1343SAlexander V. Chernikov {
372537d1343SAlexander V. Chernikov 	int i;
373537d1343SAlexander V. Chernikov 
374537d1343SAlexander V. Chernikov 	VERIFY_DEPTH(depth);
375537d1343SAlexander V. Chernikov 
376537d1343SAlexander V. Chernikov 	lpm->rules_tbl[rule_index] =
377537d1343SAlexander V. Chernikov 			lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
378537d1343SAlexander V. Chernikov 			+ lpm->rule_info[depth - 1].used_rules - 1];
379537d1343SAlexander V. Chernikov 
380537d1343SAlexander V. Chernikov 	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
381537d1343SAlexander V. Chernikov 		if (lpm->rule_info[i].used_rules > 0) {
382537d1343SAlexander V. Chernikov 			lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
383537d1343SAlexander V. Chernikov 					lpm->rules_tbl[lpm->rule_info[i].first_rule
384537d1343SAlexander V. Chernikov 						+ lpm->rule_info[i].used_rules - 1];
385537d1343SAlexander V. Chernikov 			lpm->rule_info[i].first_rule--;
386537d1343SAlexander V. Chernikov 		}
387537d1343SAlexander V. Chernikov 	}
388537d1343SAlexander V. Chernikov 
389537d1343SAlexander V. Chernikov 	lpm->rule_info[depth - 1].used_rules--;
390537d1343SAlexander V. Chernikov }
391537d1343SAlexander V. Chernikov 
392537d1343SAlexander V. Chernikov /*
393537d1343SAlexander V. Chernikov  * Finds a rule in rule table.
394537d1343SAlexander V. Chernikov  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
395537d1343SAlexander V. Chernikov  */
396537d1343SAlexander V. Chernikov static int32_t
397537d1343SAlexander V. Chernikov rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
398537d1343SAlexander V. Chernikov {
399537d1343SAlexander V. Chernikov 	uint32_t rule_gindex, last_rule, rule_index;
400537d1343SAlexander V. Chernikov 
401537d1343SAlexander V. Chernikov 	VERIFY_DEPTH(depth);
402537d1343SAlexander V. Chernikov 
403537d1343SAlexander V. Chernikov 	rule_gindex = lpm->rule_info[depth - 1].first_rule;
404537d1343SAlexander V. Chernikov 	last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
405537d1343SAlexander V. Chernikov 
406537d1343SAlexander V. Chernikov 	/* Scan used rules at given depth to find rule. */
407537d1343SAlexander V. Chernikov 	for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
408537d1343SAlexander V. Chernikov 		/* If rule is found return the rule index. */
409537d1343SAlexander V. Chernikov 		if (lpm->rules_tbl[rule_index].ip == ip_masked)
410537d1343SAlexander V. Chernikov 			return rule_index;
411537d1343SAlexander V. Chernikov 	}
412537d1343SAlexander V. Chernikov 
413537d1343SAlexander V. Chernikov 	/* If rule is not found return -EINVAL. */
414537d1343SAlexander V. Chernikov 	return -EINVAL;
415537d1343SAlexander V. Chernikov }
416537d1343SAlexander V. Chernikov #endif
417537d1343SAlexander V. Chernikov 
418537d1343SAlexander V. Chernikov /*
419537d1343SAlexander V. Chernikov  * Find, clean and allocate a tbl8.
420537d1343SAlexander V. Chernikov  */
421537d1343SAlexander V. Chernikov static int32_t
tbl8_alloc(struct rte_lpm_tbl_entry * tbl8,uint32_t number_tbl8s)422537d1343SAlexander V. Chernikov tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
423537d1343SAlexander V. Chernikov {
424537d1343SAlexander V. Chernikov 	uint32_t group_idx; /* tbl8 group index. */
425537d1343SAlexander V. Chernikov 	struct rte_lpm_tbl_entry *tbl8_entry;
426537d1343SAlexander V. Chernikov 
427537d1343SAlexander V. Chernikov 	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
428537d1343SAlexander V. Chernikov 	for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
429537d1343SAlexander V. Chernikov 		tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
430537d1343SAlexander V. Chernikov 		/* If a free tbl8 group is found clean it and set as VALID. */
431537d1343SAlexander V. Chernikov 		if (!tbl8_entry->valid_group) {
432537d1343SAlexander V. Chernikov 			struct rte_lpm_tbl_entry new_tbl8_entry = {
433537d1343SAlexander V. Chernikov 				.next_hop = 0,
434537d1343SAlexander V. Chernikov 				.valid = INVALID,
435537d1343SAlexander V. Chernikov 				.depth = 0,
436537d1343SAlexander V. Chernikov 				.valid_group = VALID,
437537d1343SAlexander V. Chernikov 			};
438537d1343SAlexander V. Chernikov 
439537d1343SAlexander V. Chernikov 			memset(&tbl8_entry[0], 0,
440537d1343SAlexander V. Chernikov 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
441537d1343SAlexander V. Chernikov 					sizeof(tbl8_entry[0]));
442537d1343SAlexander V. Chernikov 
443537d1343SAlexander V. Chernikov 			__atomic_store(tbl8_entry, &new_tbl8_entry,
444537d1343SAlexander V. Chernikov 					__ATOMIC_RELAXED);
445537d1343SAlexander V. Chernikov 
446537d1343SAlexander V. Chernikov 			/* Return group index for allocated tbl8 group. */
447537d1343SAlexander V. Chernikov 			return group_idx;
448537d1343SAlexander V. Chernikov 		}
449537d1343SAlexander V. Chernikov 	}
450537d1343SAlexander V. Chernikov 
451537d1343SAlexander V. Chernikov 	/* If there are no tbl8 groups free then return error. */
452537d1343SAlexander V. Chernikov 	return -ENOSPC;
453537d1343SAlexander V. Chernikov }
454537d1343SAlexander V. Chernikov 
455537d1343SAlexander V. Chernikov static void
tbl8_free(struct rte_lpm_tbl_entry * tbl8,uint32_t tbl8_group_start)456537d1343SAlexander V. Chernikov tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
457537d1343SAlexander V. Chernikov {
458537d1343SAlexander V. Chernikov 	/* Set tbl8 group invalid*/
459537d1343SAlexander V. Chernikov 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
460537d1343SAlexander V. Chernikov 
461537d1343SAlexander V. Chernikov 	__atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
462537d1343SAlexander V. Chernikov 			__ATOMIC_RELAXED);
463537d1343SAlexander V. Chernikov }
464537d1343SAlexander V. Chernikov 
465537d1343SAlexander V. Chernikov static __rte_noinline int32_t
add_depth_small(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint32_t next_hop)466537d1343SAlexander V. Chernikov add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
467537d1343SAlexander V. Chernikov 		uint32_t next_hop)
468537d1343SAlexander V. Chernikov {
469537d1343SAlexander V. Chernikov #define group_idx next_hop
470537d1343SAlexander V. Chernikov 	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
471537d1343SAlexander V. Chernikov 
472537d1343SAlexander V. Chernikov 	/* Calculate the index into Table24. */
473537d1343SAlexander V. Chernikov 	tbl24_index = ip >> 8;
474537d1343SAlexander V. Chernikov 	tbl24_range = depth_to_range(depth);
475537d1343SAlexander V. Chernikov 
476537d1343SAlexander V. Chernikov 	for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
477537d1343SAlexander V. Chernikov 		/*
478537d1343SAlexander V. Chernikov 		 * For invalid OR valid and non-extended tbl 24 entries set
479537d1343SAlexander V. Chernikov 		 * entry.
480537d1343SAlexander V. Chernikov 		 */
481537d1343SAlexander V. Chernikov 		if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
482537d1343SAlexander V. Chernikov 				lpm->tbl24[i].depth <= depth)) {
483537d1343SAlexander V. Chernikov 
484537d1343SAlexander V. Chernikov 			struct rte_lpm_tbl_entry new_tbl24_entry = {
485537d1343SAlexander V. Chernikov 				.next_hop = next_hop,
486537d1343SAlexander V. Chernikov 				.valid = VALID,
487537d1343SAlexander V. Chernikov 				.valid_group = 0,
488537d1343SAlexander V. Chernikov 				.depth = depth,
489537d1343SAlexander V. Chernikov 			};
490537d1343SAlexander V. Chernikov 
491537d1343SAlexander V. Chernikov 			/* Setting tbl24 entry in one go to avoid race
492537d1343SAlexander V. Chernikov 			 * conditions
493537d1343SAlexander V. Chernikov 			 */
494537d1343SAlexander V. Chernikov 			__atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
495537d1343SAlexander V. Chernikov 					__ATOMIC_RELEASE);
496537d1343SAlexander V. Chernikov 
497537d1343SAlexander V. Chernikov 			continue;
498537d1343SAlexander V. Chernikov 		}
499537d1343SAlexander V. Chernikov 
500537d1343SAlexander V. Chernikov 		if (lpm->tbl24[i].valid_group == 1) {
501537d1343SAlexander V. Chernikov 			/* If tbl24 entry is valid and extended calculate the
502537d1343SAlexander V. Chernikov 			 *  index into tbl8.
503537d1343SAlexander V. Chernikov 			 */
504537d1343SAlexander V. Chernikov 			tbl8_index = lpm->tbl24[i].group_idx *
505537d1343SAlexander V. Chernikov 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
506537d1343SAlexander V. Chernikov 			tbl8_group_end = tbl8_index +
507537d1343SAlexander V. Chernikov 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
508537d1343SAlexander V. Chernikov 
509537d1343SAlexander V. Chernikov 			for (j = tbl8_index; j < tbl8_group_end; j++) {
510537d1343SAlexander V. Chernikov 				if (!lpm->tbl8[j].valid ||
511537d1343SAlexander V. Chernikov 						lpm->tbl8[j].depth <= depth) {
512537d1343SAlexander V. Chernikov 					struct rte_lpm_tbl_entry
513537d1343SAlexander V. Chernikov 						new_tbl8_entry = {
514537d1343SAlexander V. Chernikov 						.valid = VALID,
515537d1343SAlexander V. Chernikov 						.valid_group = VALID,
516537d1343SAlexander V. Chernikov 						.depth = depth,
517537d1343SAlexander V. Chernikov 						.next_hop = next_hop,
518537d1343SAlexander V. Chernikov 					};
519537d1343SAlexander V. Chernikov 
520537d1343SAlexander V. Chernikov 					/*
521537d1343SAlexander V. Chernikov 					 * Setting tbl8 entry in one go to avoid
522537d1343SAlexander V. Chernikov 					 * race conditions
523537d1343SAlexander V. Chernikov 					 */
524537d1343SAlexander V. Chernikov 					__atomic_store(&lpm->tbl8[j],
525537d1343SAlexander V. Chernikov 						&new_tbl8_entry,
526537d1343SAlexander V. Chernikov 						__ATOMIC_RELAXED);
527537d1343SAlexander V. Chernikov 
528537d1343SAlexander V. Chernikov 					continue;
529537d1343SAlexander V. Chernikov 				}
530537d1343SAlexander V. Chernikov 			}
531537d1343SAlexander V. Chernikov 		}
532537d1343SAlexander V. Chernikov 	}
533537d1343SAlexander V. Chernikov #undef group_idx
534537d1343SAlexander V. Chernikov 	return 0;
535537d1343SAlexander V. Chernikov }
536537d1343SAlexander V. Chernikov 
537537d1343SAlexander V. Chernikov static __rte_noinline int32_t
add_depth_big(struct rte_lpm * lpm,uint32_t ip_masked,uint8_t depth,uint32_t next_hop)538537d1343SAlexander V. Chernikov add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
539537d1343SAlexander V. Chernikov 		uint32_t next_hop)
540537d1343SAlexander V. Chernikov {
541537d1343SAlexander V. Chernikov #define group_idx next_hop
542537d1343SAlexander V. Chernikov 	uint32_t tbl24_index;
543537d1343SAlexander V. Chernikov 	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
544537d1343SAlexander V. Chernikov 		tbl8_range, i;
545537d1343SAlexander V. Chernikov 
546537d1343SAlexander V. Chernikov 	tbl24_index = (ip_masked >> 8);
547537d1343SAlexander V. Chernikov 	tbl8_range = depth_to_range(depth);
548537d1343SAlexander V. Chernikov 
549537d1343SAlexander V. Chernikov 	if (!lpm->tbl24[tbl24_index].valid) {
550537d1343SAlexander V. Chernikov 		/* Search for a free tbl8 group. */
551537d1343SAlexander V. Chernikov 		tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
552537d1343SAlexander V. Chernikov 
553537d1343SAlexander V. Chernikov 		/* Check tbl8 allocation was successful. */
554537d1343SAlexander V. Chernikov 		if (tbl8_group_index < 0) {
555537d1343SAlexander V. Chernikov 			return tbl8_group_index;
556537d1343SAlexander V. Chernikov 		}
557537d1343SAlexander V. Chernikov 
558537d1343SAlexander V. Chernikov 		/* Find index into tbl8 and range. */
559537d1343SAlexander V. Chernikov 		tbl8_index = (tbl8_group_index *
560537d1343SAlexander V. Chernikov 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
561537d1343SAlexander V. Chernikov 				(ip_masked & 0xFF);
562537d1343SAlexander V. Chernikov 
563537d1343SAlexander V. Chernikov 		/* Set tbl8 entry. */
564537d1343SAlexander V. Chernikov 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
565537d1343SAlexander V. Chernikov 			struct rte_lpm_tbl_entry new_tbl8_entry = {
566537d1343SAlexander V. Chernikov 				.valid = VALID,
567537d1343SAlexander V. Chernikov 				.depth = depth,
568537d1343SAlexander V. Chernikov 				.valid_group = lpm->tbl8[i].valid_group,
569537d1343SAlexander V. Chernikov 				.next_hop = next_hop,
570537d1343SAlexander V. Chernikov 			};
571537d1343SAlexander V. Chernikov 			__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
572537d1343SAlexander V. Chernikov 					__ATOMIC_RELAXED);
573537d1343SAlexander V. Chernikov 		}
574537d1343SAlexander V. Chernikov 
575537d1343SAlexander V. Chernikov 		/*
576537d1343SAlexander V. Chernikov 		 * Update tbl24 entry to point to new tbl8 entry. Note: The
577537d1343SAlexander V. Chernikov 		 * ext_flag and tbl8_index need to be updated simultaneously,
578537d1343SAlexander V. Chernikov 		 * so assign whole structure in one go
579537d1343SAlexander V. Chernikov 		 */
580537d1343SAlexander V. Chernikov 
581537d1343SAlexander V. Chernikov 		struct rte_lpm_tbl_entry new_tbl24_entry = {
582537d1343SAlexander V. Chernikov 			.group_idx = tbl8_group_index,
583537d1343SAlexander V. Chernikov 			.valid = VALID,
584537d1343SAlexander V. Chernikov 			.valid_group = 1,
585537d1343SAlexander V. Chernikov 			.depth = 0,
586537d1343SAlexander V. Chernikov 		};
587537d1343SAlexander V. Chernikov 
588537d1343SAlexander V. Chernikov 		/* The tbl24 entry must be written only after the
589537d1343SAlexander V. Chernikov 		 * tbl8 entries are written.
590537d1343SAlexander V. Chernikov 		 */
591537d1343SAlexander V. Chernikov 		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
592537d1343SAlexander V. Chernikov 				__ATOMIC_RELEASE);
593537d1343SAlexander V. Chernikov 
594537d1343SAlexander V. Chernikov 	} /* If valid entry but not extended calculate the index into Table8. */
595537d1343SAlexander V. Chernikov 	else if (lpm->tbl24[tbl24_index].valid_group == 0) {
596537d1343SAlexander V. Chernikov 		/* Search for free tbl8 group. */
597537d1343SAlexander V. Chernikov 		tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
598537d1343SAlexander V. Chernikov 
599537d1343SAlexander V. Chernikov 		if (tbl8_group_index < 0) {
600537d1343SAlexander V. Chernikov 			return tbl8_group_index;
601537d1343SAlexander V. Chernikov 		}
602537d1343SAlexander V. Chernikov 
603537d1343SAlexander V. Chernikov 		tbl8_group_start = tbl8_group_index *
604537d1343SAlexander V. Chernikov 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
605537d1343SAlexander V. Chernikov 		tbl8_group_end = tbl8_group_start +
606537d1343SAlexander V. Chernikov 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
607537d1343SAlexander V. Chernikov 
608537d1343SAlexander V. Chernikov 		/* Populate new tbl8 with tbl24 value. */
609537d1343SAlexander V. Chernikov 		for (i = tbl8_group_start; i < tbl8_group_end; i++) {
610537d1343SAlexander V. Chernikov 			struct rte_lpm_tbl_entry new_tbl8_entry = {
611537d1343SAlexander V. Chernikov 				.valid = VALID,
612537d1343SAlexander V. Chernikov 				.depth = lpm->tbl24[tbl24_index].depth,
613537d1343SAlexander V. Chernikov 				.valid_group = lpm->tbl8[i].valid_group,
614537d1343SAlexander V. Chernikov 				.next_hop = lpm->tbl24[tbl24_index].next_hop,
615537d1343SAlexander V. Chernikov 			};
616537d1343SAlexander V. Chernikov 			__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
617537d1343SAlexander V. Chernikov 					__ATOMIC_RELAXED);
618537d1343SAlexander V. Chernikov 		}
619537d1343SAlexander V. Chernikov 
620537d1343SAlexander V. Chernikov 		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
621537d1343SAlexander V. Chernikov 
622537d1343SAlexander V. Chernikov 		/* Insert new rule into the tbl8 entry. */
623537d1343SAlexander V. Chernikov 		for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
624537d1343SAlexander V. Chernikov 			struct rte_lpm_tbl_entry new_tbl8_entry = {
625537d1343SAlexander V. Chernikov 				.valid = VALID,
626537d1343SAlexander V. Chernikov 				.depth = depth,
627537d1343SAlexander V. Chernikov 				.valid_group = lpm->tbl8[i].valid_group,
628537d1343SAlexander V. Chernikov 				.next_hop = next_hop,
629537d1343SAlexander V. Chernikov 			};
630537d1343SAlexander V. Chernikov 			__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
631537d1343SAlexander V. Chernikov 					__ATOMIC_RELAXED);
632537d1343SAlexander V. Chernikov 		}
633537d1343SAlexander V. Chernikov 
634537d1343SAlexander V. Chernikov 		/*
635537d1343SAlexander V. Chernikov 		 * Update tbl24 entry to point to new tbl8 entry. Note: The
636537d1343SAlexander V. Chernikov 		 * ext_flag and tbl8_index need to be updated simultaneously,
637537d1343SAlexander V. Chernikov 		 * so assign whole structure in one go.
638537d1343SAlexander V. Chernikov 		 */
639537d1343SAlexander V. Chernikov 
640537d1343SAlexander V. Chernikov 		struct rte_lpm_tbl_entry new_tbl24_entry = {
641537d1343SAlexander V. Chernikov 				.group_idx = tbl8_group_index,
642537d1343SAlexander V. Chernikov 				.valid = VALID,
643537d1343SAlexander V. Chernikov 				.valid_group = 1,
644537d1343SAlexander V. Chernikov 				.depth = 0,
645537d1343SAlexander V. Chernikov 		};
646537d1343SAlexander V. Chernikov 
647537d1343SAlexander V. Chernikov 		/* The tbl24 entry must be written only after the
648537d1343SAlexander V. Chernikov 		 * tbl8 entries are written.
649537d1343SAlexander V. Chernikov 		 */
650537d1343SAlexander V. Chernikov 		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
651537d1343SAlexander V. Chernikov 				__ATOMIC_RELEASE);
652537d1343SAlexander V. Chernikov 
653537d1343SAlexander V. Chernikov 	} else { /*
654537d1343SAlexander V. Chernikov 		* If it is valid, extended entry calculate the index into tbl8.
655537d1343SAlexander V. Chernikov 		*/
656537d1343SAlexander V. Chernikov 		tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
657537d1343SAlexander V. Chernikov 		tbl8_group_start = tbl8_group_index *
658537d1343SAlexander V. Chernikov 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
659537d1343SAlexander V. Chernikov 		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
660537d1343SAlexander V. Chernikov 
661537d1343SAlexander V. Chernikov 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
662537d1343SAlexander V. Chernikov 
663537d1343SAlexander V. Chernikov 			if (!lpm->tbl8[i].valid ||
664537d1343SAlexander V. Chernikov 					lpm->tbl8[i].depth <= depth) {
665537d1343SAlexander V. Chernikov 				struct rte_lpm_tbl_entry new_tbl8_entry = {
666537d1343SAlexander V. Chernikov 					.valid = VALID,
667537d1343SAlexander V. Chernikov 					.depth = depth,
668537d1343SAlexander V. Chernikov 					.next_hop = next_hop,
669537d1343SAlexander V. Chernikov 					.valid_group = lpm->tbl8[i].valid_group,
670537d1343SAlexander V. Chernikov 				};
671537d1343SAlexander V. Chernikov 
672537d1343SAlexander V. Chernikov 				/*
673537d1343SAlexander V. Chernikov 				 * Setting tbl8 entry in one go to avoid race
674537d1343SAlexander V. Chernikov 				 * condition
675537d1343SAlexander V. Chernikov 				 */
676537d1343SAlexander V. Chernikov 				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
677537d1343SAlexander V. Chernikov 						__ATOMIC_RELAXED);
678537d1343SAlexander V. Chernikov 
679537d1343SAlexander V. Chernikov 				continue;
680537d1343SAlexander V. Chernikov 			}
681537d1343SAlexander V. Chernikov 		}
682537d1343SAlexander V. Chernikov 	}
683537d1343SAlexander V. Chernikov #undef group_idx
684537d1343SAlexander V. Chernikov 	return 0;
685537d1343SAlexander V. Chernikov }
686537d1343SAlexander V. Chernikov 
687537d1343SAlexander V. Chernikov /*
688537d1343SAlexander V. Chernikov  * Add a route
689537d1343SAlexander V. Chernikov  */
690537d1343SAlexander V. Chernikov int
rte_lpm_add(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint32_t next_hop)691537d1343SAlexander V. Chernikov rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
692537d1343SAlexander V. Chernikov 		uint32_t next_hop)
693537d1343SAlexander V. Chernikov {
694537d1343SAlexander V. Chernikov 	int32_t status = 0;
695537d1343SAlexander V. Chernikov 	uint32_t ip_masked;
696537d1343SAlexander V. Chernikov 
697537d1343SAlexander V. Chernikov 	/* Check user arguments. */
698537d1343SAlexander V. Chernikov 	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
699537d1343SAlexander V. Chernikov 		return -EINVAL;
700537d1343SAlexander V. Chernikov 
701537d1343SAlexander V. Chernikov 	ip_masked = ip & depth_to_mask(depth);
702537d1343SAlexander V. Chernikov 
703537d1343SAlexander V. Chernikov #if 0
704537d1343SAlexander V. Chernikov 	/* Add the rule to the rule table. */
705537d1343SAlexander V. Chernikov 	rule_index = rule_add(lpm, ip_masked, depth, next_hop);
706537d1343SAlexander V. Chernikov 
707537d1343SAlexander V. Chernikov 	/* Skip table entries update if The rule is the same as
708537d1343SAlexander V. Chernikov 	 * the rule in the rules table.
709537d1343SAlexander V. Chernikov 	 */
710537d1343SAlexander V. Chernikov 	if (rule_index == -EEXIST)
711537d1343SAlexander V. Chernikov 		return 0;
712537d1343SAlexander V. Chernikov 
713537d1343SAlexander V. Chernikov 	/* If the is no space available for new rule return error. */
714537d1343SAlexander V. Chernikov 	if (rule_index < 0) {
715537d1343SAlexander V. Chernikov 		return rule_index;
716537d1343SAlexander V. Chernikov 	}
717537d1343SAlexander V. Chernikov #endif
718537d1343SAlexander V. Chernikov 
719537d1343SAlexander V. Chernikov 	if (depth <= MAX_DEPTH_TBL24) {
720537d1343SAlexander V. Chernikov 		status = add_depth_small(lpm, ip_masked, depth, next_hop);
721537d1343SAlexander V. Chernikov 	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
722537d1343SAlexander V. Chernikov 		status = add_depth_big(lpm, ip_masked, depth, next_hop);
723537d1343SAlexander V. Chernikov 
724537d1343SAlexander V. Chernikov 		/*
725537d1343SAlexander V. Chernikov 		 * If add fails due to exhaustion of tbl8 extensions delete
726537d1343SAlexander V. Chernikov 		 * rule that was added to rule table.
727537d1343SAlexander V. Chernikov 		 */
728537d1343SAlexander V. Chernikov 		if (status < 0) {
729537d1343SAlexander V. Chernikov 			//rule_delete(lpm, rule_index, depth);
730537d1343SAlexander V. Chernikov 
731537d1343SAlexander V. Chernikov 			return status;
732537d1343SAlexander V. Chernikov 		}
733537d1343SAlexander V. Chernikov 	}
734537d1343SAlexander V. Chernikov 
735537d1343SAlexander V. Chernikov 	return 0;
736537d1343SAlexander V. Chernikov }
737537d1343SAlexander V. Chernikov 
738537d1343SAlexander V. Chernikov #if 0
739537d1343SAlexander V. Chernikov /*
740537d1343SAlexander V. Chernikov  * Look for a rule in the high-level rules table
741537d1343SAlexander V. Chernikov  */
742537d1343SAlexander V. Chernikov int
743537d1343SAlexander V. Chernikov rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
744537d1343SAlexander V. Chernikov uint32_t *next_hop)
745537d1343SAlexander V. Chernikov {
746537d1343SAlexander V. Chernikov 	uint32_t ip_masked;
747537d1343SAlexander V. Chernikov 	int32_t rule_index;
748537d1343SAlexander V. Chernikov 
749537d1343SAlexander V. Chernikov 	/* Check user arguments. */
750537d1343SAlexander V. Chernikov 	if ((lpm == NULL) ||
751537d1343SAlexander V. Chernikov 		(next_hop == NULL) ||
752537d1343SAlexander V. Chernikov 		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
753537d1343SAlexander V. Chernikov 		return -EINVAL;
754537d1343SAlexander V. Chernikov 
755537d1343SAlexander V. Chernikov 	/* Look for the rule using rule_find. */
756537d1343SAlexander V. Chernikov 	ip_masked = ip & depth_to_mask(depth);
757537d1343SAlexander V. Chernikov 	rule_index = rule_find(lpm, ip_masked, depth);
758537d1343SAlexander V. Chernikov 
759537d1343SAlexander V. Chernikov 	if (rule_index >= 0) {
760537d1343SAlexander V. Chernikov 		*next_hop = lpm->rules_tbl[rule_index].next_hop;
761537d1343SAlexander V. Chernikov 		return 1;
762537d1343SAlexander V. Chernikov 	}
763537d1343SAlexander V. Chernikov 
764537d1343SAlexander V. Chernikov 	/* If rule is not found return 0. */
765537d1343SAlexander V. Chernikov 	return 0;
766537d1343SAlexander V. Chernikov }
767537d1343SAlexander V. Chernikov 
768537d1343SAlexander V. Chernikov static int32_t
769537d1343SAlexander V. Chernikov find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
770537d1343SAlexander V. Chernikov 		uint8_t *sub_rule_depth)
771537d1343SAlexander V. Chernikov {
772537d1343SAlexander V. Chernikov 	int32_t rule_index;
773537d1343SAlexander V. Chernikov 	uint32_t ip_masked;
774537d1343SAlexander V. Chernikov 	uint8_t prev_depth;
775537d1343SAlexander V. Chernikov 
776537d1343SAlexander V. Chernikov 	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
777537d1343SAlexander V. Chernikov 		ip_masked = ip & depth_to_mask(prev_depth);
778537d1343SAlexander V. Chernikov 
779537d1343SAlexander V. Chernikov 		rule_index = rule_find(lpm, ip_masked, prev_depth);
780537d1343SAlexander V. Chernikov 
781537d1343SAlexander V. Chernikov 		if (rule_index >= 0) {
782537d1343SAlexander V. Chernikov 			*sub_rule_depth = prev_depth;
783537d1343SAlexander V. Chernikov 			return rule_index;
784537d1343SAlexander V. Chernikov 		}
785537d1343SAlexander V. Chernikov 	}
786537d1343SAlexander V. Chernikov 
787537d1343SAlexander V. Chernikov 	return -1;
788537d1343SAlexander V. Chernikov }
789537d1343SAlexander V. Chernikov #endif
790537d1343SAlexander V. Chernikov 
791537d1343SAlexander V. Chernikov static int32_t
delete_depth_small(struct rte_lpm * lpm,uint32_t ip_masked,uint8_t depth,uint32_t sub_rule_nhop,uint8_t sub_rule_depth)792537d1343SAlexander V. Chernikov delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
793537d1343SAlexander V. Chernikov 	uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
794537d1343SAlexander V. Chernikov {
795537d1343SAlexander V. Chernikov #define group_idx next_hop
796537d1343SAlexander V. Chernikov 	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
797537d1343SAlexander V. Chernikov 
798537d1343SAlexander V. Chernikov 	/* Calculate the range and index into Table24. */
799537d1343SAlexander V. Chernikov 	tbl24_range = depth_to_range(depth);
800537d1343SAlexander V. Chernikov 	tbl24_index = (ip_masked >> 8);
801537d1343SAlexander V. Chernikov 	struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
802537d1343SAlexander V. Chernikov 
803537d1343SAlexander V. Chernikov 	/*
804537d1343SAlexander V. Chernikov 	 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
805537d1343SAlexander V. Chernikov 	 * and a positive number indicates a sub_rule_index.
806537d1343SAlexander V. Chernikov 	 */
807537d1343SAlexander V. Chernikov 	if (sub_rule_nhop == 0) {
808537d1343SAlexander V. Chernikov 		/*
809537d1343SAlexander V. Chernikov 		 * If no replacement rule exists then invalidate entries
810537d1343SAlexander V. Chernikov 		 * associated with this rule.
811537d1343SAlexander V. Chernikov 		 */
812537d1343SAlexander V. Chernikov 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
813537d1343SAlexander V. Chernikov 
814537d1343SAlexander V. Chernikov 			if (lpm->tbl24[i].valid_group == 0 &&
815537d1343SAlexander V. Chernikov 					lpm->tbl24[i].depth <= depth) {
816537d1343SAlexander V. Chernikov 				__atomic_store(&lpm->tbl24[i],
817537d1343SAlexander V. Chernikov 					&zero_tbl24_entry, __ATOMIC_RELEASE);
818537d1343SAlexander V. Chernikov 			} else if (lpm->tbl24[i].valid_group == 1) {
819537d1343SAlexander V. Chernikov 				/*
820537d1343SAlexander V. Chernikov 				 * If TBL24 entry is extended, then there has
821537d1343SAlexander V. Chernikov 				 * to be a rule with depth >= 25 in the
822537d1343SAlexander V. Chernikov 				 * associated TBL8 group.
823537d1343SAlexander V. Chernikov 				 */
824537d1343SAlexander V. Chernikov 
825537d1343SAlexander V. Chernikov 				tbl8_group_index = lpm->tbl24[i].group_idx;
826537d1343SAlexander V. Chernikov 				tbl8_index = tbl8_group_index *
827537d1343SAlexander V. Chernikov 						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
828537d1343SAlexander V. Chernikov 
829537d1343SAlexander V. Chernikov 				for (j = tbl8_index; j < (tbl8_index +
830537d1343SAlexander V. Chernikov 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
831537d1343SAlexander V. Chernikov 
832537d1343SAlexander V. Chernikov 					if (lpm->tbl8[j].depth <= depth)
833537d1343SAlexander V. Chernikov 						lpm->tbl8[j].valid = INVALID;
834537d1343SAlexander V. Chernikov 				}
835537d1343SAlexander V. Chernikov 			}
836537d1343SAlexander V. Chernikov 		}
837537d1343SAlexander V. Chernikov 	} else {
838537d1343SAlexander V. Chernikov 		/*
839537d1343SAlexander V. Chernikov 		 * If a replacement rule exists then modify entries
840537d1343SAlexander V. Chernikov 		 * associated with this rule.
841537d1343SAlexander V. Chernikov 		 */
842537d1343SAlexander V. Chernikov 
843537d1343SAlexander V. Chernikov 		struct rte_lpm_tbl_entry new_tbl24_entry = {
844537d1343SAlexander V. Chernikov 			.next_hop = sub_rule_nhop,
845537d1343SAlexander V. Chernikov 			.valid = VALID,
846537d1343SAlexander V. Chernikov 			.valid_group = 0,
847537d1343SAlexander V. Chernikov 			.depth = sub_rule_depth,
848537d1343SAlexander V. Chernikov 		};
849537d1343SAlexander V. Chernikov 
850537d1343SAlexander V. Chernikov 		struct rte_lpm_tbl_entry new_tbl8_entry = {
851537d1343SAlexander V. Chernikov 			.valid = VALID,
852537d1343SAlexander V. Chernikov 			.valid_group = VALID,
853537d1343SAlexander V. Chernikov 			.depth = sub_rule_depth,
854537d1343SAlexander V. Chernikov 			.next_hop = sub_rule_nhop,
855537d1343SAlexander V. Chernikov 		};
856537d1343SAlexander V. Chernikov 
857537d1343SAlexander V. Chernikov 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
858537d1343SAlexander V. Chernikov 
859537d1343SAlexander V. Chernikov 			if (lpm->tbl24[i].valid_group == 0 &&
860537d1343SAlexander V. Chernikov 					lpm->tbl24[i].depth <= depth) {
861537d1343SAlexander V. Chernikov 				__atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
862537d1343SAlexander V. Chernikov 						__ATOMIC_RELEASE);
863537d1343SAlexander V. Chernikov 			} else  if (lpm->tbl24[i].valid_group == 1) {
864537d1343SAlexander V. Chernikov 				/*
865537d1343SAlexander V. Chernikov 				 * If TBL24 entry is extended, then there has
866537d1343SAlexander V. Chernikov 				 * to be a rule with depth >= 25 in the
867537d1343SAlexander V. Chernikov 				 * associated TBL8 group.
868537d1343SAlexander V. Chernikov 				 */
869537d1343SAlexander V. Chernikov 
870537d1343SAlexander V. Chernikov 				tbl8_group_index = lpm->tbl24[i].group_idx;
871537d1343SAlexander V. Chernikov 				tbl8_index = tbl8_group_index *
872537d1343SAlexander V. Chernikov 						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
873537d1343SAlexander V. Chernikov 
874537d1343SAlexander V. Chernikov 				for (j = tbl8_index; j < (tbl8_index +
875537d1343SAlexander V. Chernikov 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
876537d1343SAlexander V. Chernikov 
877537d1343SAlexander V. Chernikov 					if (lpm->tbl8[j].depth <= depth)
878537d1343SAlexander V. Chernikov 						__atomic_store(&lpm->tbl8[j],
879537d1343SAlexander V. Chernikov 							&new_tbl8_entry,
880537d1343SAlexander V. Chernikov 							__ATOMIC_RELAXED);
881537d1343SAlexander V. Chernikov 				}
882537d1343SAlexander V. Chernikov 			}
883537d1343SAlexander V. Chernikov 		}
884537d1343SAlexander V. Chernikov 	}
885537d1343SAlexander V. Chernikov #undef group_idx
886537d1343SAlexander V. Chernikov 	return 0;
887537d1343SAlexander V. Chernikov }
888537d1343SAlexander V. Chernikov 
889537d1343SAlexander V. Chernikov /*
890537d1343SAlexander V. Chernikov  * Checks if table 8 group can be recycled.
891537d1343SAlexander V. Chernikov  *
892537d1343SAlexander V. Chernikov  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
893537d1343SAlexander V. Chernikov  * Return of -EINVAL means tbl8 is empty and thus can be recycled
894537d1343SAlexander V. Chernikov  * Return of value > -1 means tbl8 is in use but has all the same values and
895537d1343SAlexander V. Chernikov  * thus can be recycled
896537d1343SAlexander V. Chernikov  */
897537d1343SAlexander V. Chernikov static int32_t
tbl8_recycle_check(struct rte_lpm_tbl_entry * tbl8,uint32_t tbl8_group_start)898537d1343SAlexander V. Chernikov tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
899537d1343SAlexander V. Chernikov 		uint32_t tbl8_group_start)
900537d1343SAlexander V. Chernikov {
901537d1343SAlexander V. Chernikov 	uint32_t tbl8_group_end, i;
902537d1343SAlexander V. Chernikov 	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
903537d1343SAlexander V. Chernikov 
904537d1343SAlexander V. Chernikov 	/*
905537d1343SAlexander V. Chernikov 	 * Check the first entry of the given tbl8. If it is invalid we know
906537d1343SAlexander V. Chernikov 	 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
907537d1343SAlexander V. Chernikov 	 *  (As they would affect all entries in a tbl8) and thus this table
908537d1343SAlexander V. Chernikov 	 *  can not be recycled.
909537d1343SAlexander V. Chernikov 	 */
910537d1343SAlexander V. Chernikov 	if (tbl8[tbl8_group_start].valid) {
911537d1343SAlexander V. Chernikov 		/*
912537d1343SAlexander V. Chernikov 		 * If first entry is valid check if the depth is less than 24
913537d1343SAlexander V. Chernikov 		 * and if so check the rest of the entries to verify that they
914537d1343SAlexander V. Chernikov 		 * are all of this depth.
915537d1343SAlexander V. Chernikov 		 */
916537d1343SAlexander V. Chernikov 		if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
917537d1343SAlexander V. Chernikov 			for (i = (tbl8_group_start + 1); i < tbl8_group_end;
918537d1343SAlexander V. Chernikov 					i++) {
919537d1343SAlexander V. Chernikov 
920537d1343SAlexander V. Chernikov 				if (tbl8[i].depth !=
921537d1343SAlexander V. Chernikov 						tbl8[tbl8_group_start].depth) {
922537d1343SAlexander V. Chernikov 
923537d1343SAlexander V. Chernikov 					return -EEXIST;
924537d1343SAlexander V. Chernikov 				}
925537d1343SAlexander V. Chernikov 			}
926537d1343SAlexander V. Chernikov 			/* If all entries are the same return the tb8 index */
927537d1343SAlexander V. Chernikov 			return tbl8_group_start;
928537d1343SAlexander V. Chernikov 		}
929537d1343SAlexander V. Chernikov 
930537d1343SAlexander V. Chernikov 		return -EEXIST;
931537d1343SAlexander V. Chernikov 	}
932537d1343SAlexander V. Chernikov 	/*
933537d1343SAlexander V. Chernikov 	 * If the first entry is invalid check if the rest of the entries in
934537d1343SAlexander V. Chernikov 	 * the tbl8 are invalid.
935537d1343SAlexander V. Chernikov 	 */
936537d1343SAlexander V. Chernikov 	for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
937537d1343SAlexander V. Chernikov 		if (tbl8[i].valid)
938537d1343SAlexander V. Chernikov 			return -EEXIST;
939537d1343SAlexander V. Chernikov 	}
940537d1343SAlexander V. Chernikov 	/* If no valid entries are found then return -EINVAL. */
941537d1343SAlexander V. Chernikov 	return -EINVAL;
942537d1343SAlexander V. Chernikov }
943537d1343SAlexander V. Chernikov 
944537d1343SAlexander V. Chernikov static int32_t
delete_depth_big(struct rte_lpm * lpm,uint32_t ip_masked,uint8_t depth,uint32_t sub_rule_nhop,uint8_t sub_rule_depth)945537d1343SAlexander V. Chernikov delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
946537d1343SAlexander V. Chernikov 	uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
947537d1343SAlexander V. Chernikov {
948537d1343SAlexander V. Chernikov #define group_idx next_hop
949537d1343SAlexander V. Chernikov 	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
950537d1343SAlexander V. Chernikov 			tbl8_range, i;
951537d1343SAlexander V. Chernikov 	int32_t tbl8_recycle_index;
952537d1343SAlexander V. Chernikov 
953537d1343SAlexander V. Chernikov 	/*
954537d1343SAlexander V. Chernikov 	 * Calculate the index into tbl24 and range. Note: All depths larger
955537d1343SAlexander V. Chernikov 	 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
956537d1343SAlexander V. Chernikov 	 */
957537d1343SAlexander V. Chernikov 	tbl24_index = ip_masked >> 8;
958537d1343SAlexander V. Chernikov 
959537d1343SAlexander V. Chernikov 	/* Calculate the index into tbl8 and range. */
960537d1343SAlexander V. Chernikov 	tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
961537d1343SAlexander V. Chernikov 	tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
962537d1343SAlexander V. Chernikov 	tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
963537d1343SAlexander V. Chernikov 	tbl8_range = depth_to_range(depth);
964537d1343SAlexander V. Chernikov 
965537d1343SAlexander V. Chernikov 	if (sub_rule_nhop == 0) {
966537d1343SAlexander V. Chernikov 		/*
967537d1343SAlexander V. Chernikov 		 * Loop through the range of entries on tbl8 for which the
968537d1343SAlexander V. Chernikov 		 * rule_to_delete must be removed or modified.
969537d1343SAlexander V. Chernikov 		 */
970537d1343SAlexander V. Chernikov 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
971537d1343SAlexander V. Chernikov 			if (lpm->tbl8[i].depth <= depth)
972537d1343SAlexander V. Chernikov 				lpm->tbl8[i].valid = INVALID;
973537d1343SAlexander V. Chernikov 		}
974537d1343SAlexander V. Chernikov 	} else {
975537d1343SAlexander V. Chernikov 		/* Set new tbl8 entry. */
976537d1343SAlexander V. Chernikov 		struct rte_lpm_tbl_entry new_tbl8_entry = {
977537d1343SAlexander V. Chernikov 			.valid = VALID,
978537d1343SAlexander V. Chernikov 			.depth = sub_rule_depth,
979537d1343SAlexander V. Chernikov 			.valid_group = lpm->tbl8[tbl8_group_start].valid_group,
980537d1343SAlexander V. Chernikov 			.next_hop = sub_rule_nhop,
981537d1343SAlexander V. Chernikov 		};
982537d1343SAlexander V. Chernikov 
983537d1343SAlexander V. Chernikov 		/*
984537d1343SAlexander V. Chernikov 		 * Loop through the range of entries on tbl8 for which the
985537d1343SAlexander V. Chernikov 		 * rule_to_delete must be modified.
986537d1343SAlexander V. Chernikov 		 */
987537d1343SAlexander V. Chernikov 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
988537d1343SAlexander V. Chernikov 			if (lpm->tbl8[i].depth <= depth)
989537d1343SAlexander V. Chernikov 				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
990537d1343SAlexander V. Chernikov 						__ATOMIC_RELAXED);
991537d1343SAlexander V. Chernikov 		}
992537d1343SAlexander V. Chernikov 	}
993537d1343SAlexander V. Chernikov 
994537d1343SAlexander V. Chernikov 	/*
995537d1343SAlexander V. Chernikov 	 * Check if there are any valid entries in this tbl8 group. If all
996537d1343SAlexander V. Chernikov 	 * tbl8 entries are invalid we can free the tbl8 and invalidate the
997537d1343SAlexander V. Chernikov 	 * associated tbl24 entry.
998537d1343SAlexander V. Chernikov 	 */
999537d1343SAlexander V. Chernikov 
1000537d1343SAlexander V. Chernikov 	tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1001537d1343SAlexander V. Chernikov 
1002537d1343SAlexander V. Chernikov 	if (tbl8_recycle_index == -EINVAL) {
1003537d1343SAlexander V. Chernikov 		/* Set tbl24 before freeing tbl8 to avoid race condition.
1004537d1343SAlexander V. Chernikov 		 * Prevent the free of the tbl8 group from hoisting.
1005537d1343SAlexander V. Chernikov 		 */
1006537d1343SAlexander V. Chernikov 		lpm->tbl24[tbl24_index].valid = 0;
1007537d1343SAlexander V. Chernikov 		__atomic_thread_fence(__ATOMIC_RELEASE);
1008537d1343SAlexander V. Chernikov 		tbl8_free(lpm->tbl8, tbl8_group_start);
1009537d1343SAlexander V. Chernikov 	} else if (tbl8_recycle_index > -1) {
1010537d1343SAlexander V. Chernikov 		/* Update tbl24 entry. */
1011537d1343SAlexander V. Chernikov 		struct rte_lpm_tbl_entry new_tbl24_entry = {
1012537d1343SAlexander V. Chernikov 			.next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1013537d1343SAlexander V. Chernikov 			.valid = VALID,
1014537d1343SAlexander V. Chernikov 			.valid_group = 0,
1015537d1343SAlexander V. Chernikov 			.depth = lpm->tbl8[tbl8_recycle_index].depth,
1016537d1343SAlexander V. Chernikov 		};
1017537d1343SAlexander V. Chernikov 
1018537d1343SAlexander V. Chernikov 		/* Set tbl24 before freeing tbl8 to avoid race condition.
1019537d1343SAlexander V. Chernikov 		 * Prevent the free of the tbl8 group from hoisting.
1020537d1343SAlexander V. Chernikov 		 */
1021537d1343SAlexander V. Chernikov 		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1022537d1343SAlexander V. Chernikov 				__ATOMIC_RELAXED);
1023537d1343SAlexander V. Chernikov 		__atomic_thread_fence(__ATOMIC_RELEASE);
1024537d1343SAlexander V. Chernikov 		tbl8_free(lpm->tbl8, tbl8_group_start);
1025537d1343SAlexander V. Chernikov 	}
1026537d1343SAlexander V. Chernikov #undef group_idx
1027537d1343SAlexander V. Chernikov 	return 0;
1028537d1343SAlexander V. Chernikov }
1029537d1343SAlexander V. Chernikov 
1030537d1343SAlexander V. Chernikov /*
1031537d1343SAlexander V. Chernikov  * Deletes a rule
1032537d1343SAlexander V. Chernikov  */
1033537d1343SAlexander V. Chernikov int
rte_lpm_delete(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint8_t sub_rule_depth,uint32_t sub_rule_nhop)1034537d1343SAlexander V. Chernikov rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1035537d1343SAlexander V. Chernikov 	uint8_t sub_rule_depth, uint32_t sub_rule_nhop)
1036537d1343SAlexander V. Chernikov {
1037537d1343SAlexander V. Chernikov 	//int32_t rule_to_delete_index;
1038537d1343SAlexander V. Chernikov 	uint32_t ip_masked;
1039537d1343SAlexander V. Chernikov 	//uint8_t sub_rule_depth;
1040537d1343SAlexander V. Chernikov 	/*
1041537d1343SAlexander V. Chernikov 	 * Check input arguments. Note: IP must be a positive integer of 32
1042537d1343SAlexander V. Chernikov 	 * bits in length therefore it need not be checked.
1043537d1343SAlexander V. Chernikov 	 */
1044537d1343SAlexander V. Chernikov 	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1045537d1343SAlexander V. Chernikov 		return -EINVAL;
1046537d1343SAlexander V. Chernikov 	}
1047537d1343SAlexander V. Chernikov 
1048537d1343SAlexander V. Chernikov 	ip_masked = ip & depth_to_mask(depth);
1049537d1343SAlexander V. Chernikov 
1050537d1343SAlexander V. Chernikov #if 0
1051537d1343SAlexander V. Chernikov 	/*
1052537d1343SAlexander V. Chernikov 	 * Find the index of the input rule, that needs to be deleted, in the
1053537d1343SAlexander V. Chernikov 	 * rule table.
1054537d1343SAlexander V. Chernikov 	 */
1055537d1343SAlexander V. Chernikov 	rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1056537d1343SAlexander V. Chernikov 
1057537d1343SAlexander V. Chernikov 	/*
1058537d1343SAlexander V. Chernikov 	 * Check if rule_to_delete_index was found. If no rule was found the
1059537d1343SAlexander V. Chernikov 	 * function rule_find returns -EINVAL.
1060537d1343SAlexander V. Chernikov 	 */
1061537d1343SAlexander V. Chernikov 	if (rule_to_delete_index < 0)
1062537d1343SAlexander V. Chernikov 		return -EINVAL;
1063537d1343SAlexander V. Chernikov 
1064537d1343SAlexander V. Chernikov 	/* Delete the rule from the rule table. */
1065537d1343SAlexander V. Chernikov 	rule_delete(lpm, rule_to_delete_index, depth);
1066537d1343SAlexander V. Chernikov #endif
1067537d1343SAlexander V. Chernikov 
1068537d1343SAlexander V. Chernikov 	/*
1069537d1343SAlexander V. Chernikov 	 * Find rule to replace the rule_to_delete. If there is no rule to
1070537d1343SAlexander V. Chernikov 	 * replace the rule_to_delete we return -1 and invalidate the table
1071537d1343SAlexander V. Chernikov 	 * entries associated with this rule.
1072537d1343SAlexander V. Chernikov 	 */
1073537d1343SAlexander V. Chernikov 	//sub_rule_depth = *psub_rule_depth;
1074537d1343SAlexander V. Chernikov 	//sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1075537d1343SAlexander V. Chernikov 
1076537d1343SAlexander V. Chernikov 	/*
1077537d1343SAlexander V. Chernikov 	 * If the input depth value is less than 25 use function
1078537d1343SAlexander V. Chernikov 	 * delete_depth_small otherwise use delete_depth_big.
1079537d1343SAlexander V. Chernikov 	 */
1080537d1343SAlexander V. Chernikov 	if (depth <= MAX_DEPTH_TBL24) {
1081537d1343SAlexander V. Chernikov 		return delete_depth_small(lpm, ip_masked, depth,
1082537d1343SAlexander V. Chernikov 				sub_rule_nhop, sub_rule_depth);
1083537d1343SAlexander V. Chernikov 	} else { /* If depth > MAX_DEPTH_TBL24 */
1084537d1343SAlexander V. Chernikov 		return delete_depth_big(lpm, ip_masked, depth, sub_rule_nhop,
1085537d1343SAlexander V. Chernikov 				sub_rule_depth);
1086537d1343SAlexander V. Chernikov 	}
1087537d1343SAlexander V. Chernikov }
1088537d1343SAlexander V. Chernikov 
1089537d1343SAlexander V. Chernikov /*
1090537d1343SAlexander V. Chernikov  * Delete all rules from the LPM table.
1091537d1343SAlexander V. Chernikov  */
1092537d1343SAlexander V. Chernikov void
rte_lpm_delete_all(struct rte_lpm * lpm)1093537d1343SAlexander V. Chernikov rte_lpm_delete_all(struct rte_lpm *lpm)
1094537d1343SAlexander V. Chernikov {
1095537d1343SAlexander V. Chernikov 	/* Zero rule information. */
1096537d1343SAlexander V. Chernikov 	memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1097537d1343SAlexander V. Chernikov 
1098537d1343SAlexander V. Chernikov 	/* Zero tbl24. */
1099537d1343SAlexander V. Chernikov 	memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1100537d1343SAlexander V. Chernikov 
1101537d1343SAlexander V. Chernikov 	/* Zero tbl8. */
1102537d1343SAlexander V. Chernikov 	memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1103537d1343SAlexander V. Chernikov 			* RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1104537d1343SAlexander V. Chernikov 
1105537d1343SAlexander V. Chernikov 	/* Delete all rules form the rules table. */
1106537d1343SAlexander V. Chernikov 	memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1107537d1343SAlexander V. Chernikov }
1108