xref: /freebsd/sys/vm/vm_radix.h (revision 429c871d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 EMC Corp.
5  * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6  * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifndef _VM_RADIX_H_
32 #define _VM_RADIX_H_
33 
34 #include <vm/_vm_radix.h>
35 
36 #ifdef _KERNEL
37 #include <sys/pctrie.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm.h>
40 
41 void		vm_radix_wait(void);
42 void		vm_radix_zinit(void);
43 void		*vm_radix_node_alloc(struct pctrie *ptree);
44 void		vm_radix_node_free(struct pctrie *ptree, void *node);
45 extern smr_t	vm_radix_smr;
46 
47 static __inline void
vm_radix_init(struct vm_radix * rtree)48 vm_radix_init(struct vm_radix *rtree)
49 {
50 	pctrie_init(&rtree->rt_trie);
51 }
52 
53 static __inline bool
vm_radix_is_empty(struct vm_radix * rtree)54 vm_radix_is_empty(struct vm_radix *rtree)
55 {
56 	return (pctrie_is_empty(&rtree->rt_trie));
57 }
58 
59 PCTRIE_DEFINE_SMR(VM_RADIX, vm_page, pindex, vm_radix_node_alloc, vm_radix_node_free,
60     vm_radix_smr);
61 
62 /*
63  * Inserts the key-value pair into the trie.
64  * Panics if the key already exists.
65  */
66 static __inline int
vm_radix_insert(struct vm_radix * rtree,vm_page_t page)67 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
68 {
69 	return (VM_RADIX_PCTRIE_INSERT(&rtree->rt_trie, page));
70 }
71 
72 /*
73  * Returns the value stored at the index assuming there is an external lock.
74  *
75  * If the index is not present, NULL is returned.
76  */
77 static __inline vm_page_t
vm_radix_lookup(struct vm_radix * rtree,vm_pindex_t index)78 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
79 {
80 	return (VM_RADIX_PCTRIE_LOOKUP(&rtree->rt_trie, index));
81 }
82 
83 /*
84  * Returns the value stored at the index without requiring an external lock.
85  *
86  * If the index is not present, NULL is returned.
87  */
88 static __inline vm_page_t
vm_radix_lookup_unlocked(struct vm_radix * rtree,vm_pindex_t index)89 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
90 {
91 	return (VM_RADIX_PCTRIE_LOOKUP_UNLOCKED(&rtree->rt_trie, index));
92 }
93 
94 /*
95  * Returns the page with the least pindex that is greater than or equal to the
96  * specified pindex, or NULL if there are no such pages.
97  *
98  * Requires that access be externally synchronized by a lock.
99  */
100 static __inline vm_page_t
vm_radix_lookup_ge(struct vm_radix * rtree,vm_pindex_t index)101 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
102 {
103 	return (VM_RADIX_PCTRIE_LOOKUP_GE(&rtree->rt_trie, index));
104 }
105 
106 /*
107  * Returns the page with the greatest pindex that is less than or equal to the
108  * specified pindex, or NULL if there are no such pages.
109  *
110  * Requires that access be externally synchronized by a lock.
111  */
112 static __inline vm_page_t
vm_radix_lookup_le(struct vm_radix * rtree,vm_pindex_t index)113 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
114 {
115 	return (VM_RADIX_PCTRIE_LOOKUP_LE(&rtree->rt_trie, index));
116 }
117 
118 /*
119  * Remove the specified index from the trie, and return the value stored at
120  * that index.  If the index is not present, return NULL.
121  */
122 static __inline vm_page_t
vm_radix_remove(struct vm_radix * rtree,vm_pindex_t index)123 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
124 {
125 	return (VM_RADIX_PCTRIE_REMOVE_LOOKUP(&rtree->rt_trie, index));
126 }
127 
128 /*
129  * Remove and free all the nodes from the radix tree.
130  */
131 static __inline void
vm_radix_reclaim_allnodes(struct vm_radix * rtree)132 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
133 {
134 	VM_RADIX_PCTRIE_RECLAIM(&rtree->rt_trie);
135 }
136 
137 /*
138  * Replace an existing page in the trie with another one.
139  * Panics if there is not an old page in the trie at the new page's index.
140  */
141 static __inline vm_page_t
vm_radix_replace(struct vm_radix * rtree,vm_page_t newpage)142 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
143 {
144 	return (VM_RADIX_PCTRIE_REPLACE(&rtree->rt_trie, newpage));
145 }
146 
147 #endif /* _KERNEL */
148 #endif /* !_VM_RADIX_H_ */
149