xref: /freebsd/contrib/lib9p/hashtable.c (revision 134e1779)
1 /*
2  * Copyright 2016 Jakub Klama <jceel@FreeBSD.org>
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted providing that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
18  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
22  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
23  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24  * POSSIBILITY OF SUCH DAMAGE.
25  *
26  */
27 
28 #include <stdlib.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <assert.h>
32 #include <pthread.h>
33 #include <sys/types.h>
34 #include <sys/queue.h>
35 #include "lib9p_impl.h"
36 #include "hashtable.h"
37 
38 static struct ht_item *ht_iter_advance(struct ht_iter *, struct ht_item *);
39 
40 void
ht_init(struct ht * h,ssize_t size)41 ht_init(struct ht *h, ssize_t size)
42 {
43 	ssize_t i;
44 
45 	memset(h, 0, sizeof(struct ht));
46 	h->ht_nentries = size;
47 	h->ht_entries = l9p_calloc((size_t)size, sizeof(struct ht_entry));
48 	pthread_rwlock_init(&h->ht_rwlock, NULL);
49 
50 	for (i = 0; i < size; i++)
51 		TAILQ_INIT(&h->ht_entries[i].hte_items);
52 }
53 
54 void
ht_destroy(struct ht * h)55 ht_destroy(struct ht *h)
56 {
57 	struct ht_entry *he;
58 	struct ht_item *item, *tmp;
59 	ssize_t i;
60 
61 	for (i = 0; i < h->ht_nentries; i++) {
62 		he = &h->ht_entries[i];
63 		TAILQ_FOREACH_SAFE(item, &he->hte_items, hti_link, tmp) {
64 			free(item);
65 		}
66 	}
67 
68 	pthread_rwlock_destroy(&h->ht_rwlock);
69 	free(h->ht_entries);
70 	h->ht_entries = NULL;
71 }
72 
73 void *
ht_find(struct ht * h,uint32_t hash)74 ht_find(struct ht *h, uint32_t hash)
75 {
76 	void *result;
77 
78 	ht_rdlock(h);
79 	result = ht_find_locked(h, hash);
80 	ht_unlock(h);
81 	return (result);
82 }
83 
84 void *
ht_find_locked(struct ht * h,uint32_t hash)85 ht_find_locked(struct ht *h, uint32_t hash)
86 {
87 	struct ht_entry *entry;
88 	struct ht_item *item;
89 
90 	entry = &h->ht_entries[hash % h->ht_nentries];
91 
92 	TAILQ_FOREACH(item, &entry->hte_items, hti_link) {
93 		if (item->hti_hash == hash)
94 			return (item->hti_data);
95 	}
96 
97 	return (NULL);
98 }
99 
100 int
ht_add(struct ht * h,uint32_t hash,void * value)101 ht_add(struct ht *h, uint32_t hash, void *value)
102 {
103 	struct ht_entry *entry;
104 	struct ht_item *item;
105 
106 	ht_wrlock(h);
107 	entry = &h->ht_entries[hash % h->ht_nentries];
108 
109 	TAILQ_FOREACH(item, &entry->hte_items, hti_link) {
110 		if (item->hti_hash == hash) {
111 			errno = EEXIST;
112 			ht_unlock(h);
113 			return (-1);
114 		}
115 	}
116 
117 	item = l9p_calloc(1, sizeof(struct ht_item));
118 	item->hti_hash = hash;
119 	item->hti_data = value;
120 	TAILQ_INSERT_TAIL(&entry->hte_items, item, hti_link);
121 	ht_unlock(h);
122 
123 	return (0);
124 }
125 
126 int
ht_remove(struct ht * h,uint32_t hash)127 ht_remove(struct ht *h, uint32_t hash)
128 {
129 	int result;
130 
131 	ht_wrlock(h);
132 	result = ht_remove_locked(h, hash);
133 	ht_unlock(h);
134 	return (result);
135 }
136 
137 int
ht_remove_locked(struct ht * h,uint32_t hash)138 ht_remove_locked(struct ht *h, uint32_t hash)
139 {
140 	struct ht_entry *entry;
141 	struct ht_item *item, *tmp;
142 	ssize_t slot = hash % h->ht_nentries;
143 
144 	entry = &h->ht_entries[slot];
145 
146 	TAILQ_FOREACH_SAFE(item, &entry->hte_items, hti_link, tmp) {
147 		if (item->hti_hash == hash) {
148 			TAILQ_REMOVE(&entry->hte_items, item, hti_link);
149 			free(item);
150 			return (0);
151 		}
152 	}
153 
154 	errno = ENOENT;
155 	return (-1);
156 }
157 
158 /*
159  * Inner workings for advancing the iterator.
160  *
161  * If we have a current item, that tells us how to find the
162  * next item.  If not, we get the first item from the next
163  * slot (well, the next slot with an item); in any case, we
164  * record the new slot and return the next item.
165  *
166  * For bootstrapping, iter->htit_slot can be -1 to start
167  * searching at slot 0.
168  *
169  * Caller must hold a lock on the table.
170  */
171 static struct ht_item *
ht_iter_advance(struct ht_iter * iter,struct ht_item * cur)172 ht_iter_advance(struct ht_iter *iter, struct ht_item *cur)
173 {
174 	struct ht_item *next;
175 	struct ht *h;
176 	ssize_t slot;
177 
178 	h = iter->htit_parent;
179 
180 	if (cur == NULL)
181 		next = NULL;
182 	else
183 		next = TAILQ_NEXT(cur, hti_link);
184 
185 	if (next == NULL) {
186 		slot = iter->htit_slot;
187 		while (++slot < h->ht_nentries) {
188 			next = TAILQ_FIRST(&h->ht_entries[slot].hte_items);
189 			if (next != NULL)
190 				break;
191 		}
192 		iter->htit_slot = slot;
193 	}
194 	return (next);
195 }
196 
197 /*
198  * Remove the current item - there must be one, or this is an
199  * error.  This (necessarily) pre-locates the next item, so callers
200  * must not use it on an actively-changing table.
201  */
202 int
ht_remove_at_iter(struct ht_iter * iter)203 ht_remove_at_iter(struct ht_iter *iter)
204 {
205 	struct ht_item *item;
206 	struct ht *h;
207 	ssize_t slot;
208 
209 	assert(iter != NULL);
210 
211 	if ((item = iter->htit_curr) == NULL) {
212 		errno = EINVAL;
213 		return (-1);
214 	}
215 
216 	/* remove the item from the table, saving the NEXT one */
217 	h = iter->htit_parent;
218 	ht_wrlock(h);
219 	slot = iter->htit_slot;
220 	iter->htit_next = ht_iter_advance(iter, item);
221 	TAILQ_REMOVE(&h->ht_entries[slot].hte_items, item, hti_link);
222 	ht_unlock(h);
223 
224 	/* mark us as no longer on an item, then free it */
225 	iter->htit_curr = NULL;
226 	free(item);
227 
228 	return (0);
229 }
230 
231 /*
232  * Initialize iterator.  Subsequent ht_next calls will find the
233  * first item, then the next, and so on.  Callers should in general
234  * not use this on actively-changing tables, though we do our best
235  * to make it semi-sensible.
236  */
237 void
ht_iter(struct ht * h,struct ht_iter * iter)238 ht_iter(struct ht *h, struct ht_iter *iter)
239 {
240 
241 	iter->htit_parent = h;
242 	iter->htit_curr = NULL;
243 	iter->htit_next = NULL;
244 	iter->htit_slot = -1;	/* which will increment to 0 */
245 }
246 
247 /*
248  * Return the next item, which is the first item if we have not
249  * yet been called on this iterator, or the next item if we have.
250  */
251 void *
ht_next(struct ht_iter * iter)252 ht_next(struct ht_iter *iter)
253 {
254 	struct ht_item *item;
255 	struct ht *h;
256 
257 	if ((item = iter->htit_next) == NULL) {
258 		/* no pre-loaded next; find next from current */
259 		h = iter->htit_parent;
260 		ht_rdlock(h);
261 		item = ht_iter_advance(iter, iter->htit_curr);
262 		ht_unlock(h);
263 	} else
264 		iter->htit_next = NULL;
265 	iter->htit_curr = item;
266 	return (item == NULL ? NULL : item->hti_data);
267 }
268