xref: /netbsd/sys/uvm/uvm_page_array.c (revision 131eeeda)
1 /*	$NetBSD: uvm_page_array.c,v 1.9 2020/05/26 21:52:12 ad Exp $	*/
2 
3 /*-
4  * Copyright (c)2011 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.9 2020/05/26 21:52:12 ad Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 
35 #include <uvm/uvm_extern.h>
36 #include <uvm/uvm_object.h>
37 #include <uvm/uvm_page.h>
38 #include <uvm/uvm_page_array.h>
39 
40 /*
41  * uvm_page_array_init: initialize the array.
42  */
43 
44 void
uvm_page_array_init(struct uvm_page_array * ar,struct uvm_object * uobj,unsigned int flags)45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
46     unsigned int flags)
47 {
48 
49 	ar->ar_idx = 0;
50 	ar->ar_npages = 0;
51 	ar->ar_uobj = uobj;
52 	ar->ar_flags = flags;
53 }
54 
55 /*
56  * uvm_page_array_fini: clean up the array.
57  */
58 
59 void
uvm_page_array_fini(struct uvm_page_array * ar)60 uvm_page_array_fini(struct uvm_page_array *ar)
61 {
62 
63 	/*
64 	 * currently nothing to do.
65 	 */
66 #if defined(DIAGNOSTIC)
67 	/*
68 	 * poison to trigger assertion in uvm_page_array_peek to
69 	 * detect usage errors.
70 	 */
71 	ar->ar_npages = 1;
72 	ar->ar_idx = 1000;
73 #endif /* defined(DIAGNOSTIC) */
74 }
75 
76 /*
77  * uvm_page_array_clear: forget the cached pages and initialize the array.
78  */
79 
80 void
uvm_page_array_clear(struct uvm_page_array * ar)81 uvm_page_array_clear(struct uvm_page_array *ar)
82 {
83 
84 	KASSERT(ar->ar_idx <= ar->ar_npages);
85 	ar->ar_idx = 0;
86 	ar->ar_npages = 0;
87 }
88 
89 /*
90  * uvm_page_array_peek: return the next cached page.
91  */
92 
93 struct vm_page *
uvm_page_array_peek(struct uvm_page_array * ar)94 uvm_page_array_peek(struct uvm_page_array *ar)
95 {
96 
97 	KASSERT(ar->ar_idx <= ar->ar_npages);
98 	if (ar->ar_idx == ar->ar_npages) {
99 		return NULL;
100 	}
101 	return ar->ar_pages[ar->ar_idx];
102 }
103 
104 /*
105  * uvm_page_array_advance: advance the array to the next cached page
106  */
107 
108 void
uvm_page_array_advance(struct uvm_page_array * ar)109 uvm_page_array_advance(struct uvm_page_array *ar)
110 {
111 
112 	KASSERT(ar->ar_idx <= ar->ar_npages);
113 	ar->ar_idx++;
114 	KASSERT(ar->ar_idx <= ar->ar_npages);
115 }
116 
117 /*
118  * uvm_page_array_fill: lookup pages and keep them cached.
119  *
120  * return 0 on success.  in that case, cache the result in the array
121  * so that they will be picked by later uvm_page_array_peek.
122  *
123  * nwant is a number of pages to fetch.  a caller should consider it a hint.
124  * nwant == 0 means a caller have no specific idea.
125  *
126  * return ENOENT if no pages are found.
127  *
128  * called with object lock held.
129  */
130 
131 int
uvm_page_array_fill(struct uvm_page_array * ar,voff_t off,unsigned int nwant)132 uvm_page_array_fill(struct uvm_page_array *ar, voff_t off, unsigned int nwant)
133 {
134 	unsigned int npages;
135 #if defined(DEBUG)
136 	unsigned int i;
137 #endif /* defined(DEBUG) */
138 	unsigned int maxpages = __arraycount(ar->ar_pages);
139 	struct uvm_object *uobj = ar->ar_uobj;
140 	const int flags = ar->ar_flags;
141 	const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
142 	const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
143 	int error = 0;
144 
145 	if (nwant != 0 && nwant < maxpages) {
146 		maxpages = nwant;
147 	}
148 #if 0 /* called from DDB for "show obj/f" without lock */
149 	KASSERT(rw_lock_held(uobj->vmobjlock));
150 #endif
151 	KASSERT(uvm_page_array_peek(ar) == NULL);
152 	if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
153 		unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
154 
155 		if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
156 			tagmask |= UVM_PAGE_WRITEBACK_TAG;
157 		}
158 		npages =
159 		    (backward ? radix_tree_gang_lookup_tagged_node_reverse :
160 		    radix_tree_gang_lookup_tagged_node)(
161 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
162 		    maxpages, dense, tagmask);
163 	} else {
164 		npages =
165 		    (backward ? radix_tree_gang_lookup_node_reverse :
166 		    radix_tree_gang_lookup_node)(
167 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
168 		    maxpages, dense);
169 	}
170 	if (npages == 0) {
171 		if (flags != 0) {
172 			/*
173 			 * if dense or looking for tagged entries (or
174 			 * working backwards), fail right away.
175 			 */
176 			npages = 0;
177 		} else {
178 			/*
179 			 * there's nothing else to be found with the current
180 			 * set of arguments, in the current version of the
181 			 * tree.
182 			 *
183 			 * minimize repeated tree lookups by "finding" a
184 			 * null pointer, in case the caller keeps looping (a
185 			 * common use case).
186 			 */
187 			npages = 1;
188 			ar->ar_pages[0] = NULL;
189 		}
190 		error = ENOENT;
191 	}
192 	KASSERT(npages <= maxpages);
193 	ar->ar_npages = npages;
194 	ar->ar_idx = 0;
195 #if defined(DEBUG)
196 	for (i = 0; error == 0 && i < ar->ar_npages; i++) {
197 		struct vm_page * const pg = ar->ar_pages[i];
198 
199 		KASSERT(pg != NULL);
200 		KDASSERT(pg->uobject == uobj);
201 		if (backward) {
202 			KDASSERT(pg->offset <= off);
203 			KDASSERT(i == 0 ||
204 			    pg->offset < ar->ar_pages[i - 1]->offset);
205 		} else {
206 			KDASSERT(pg->offset >= off);
207 			KDASSERT(i == 0 ||
208 			    pg->offset > ar->ar_pages[i - 1]->offset);
209 		}
210 	}
211 #endif /* defined(DEBUG) */
212 	return error;
213 }
214 
215 /*
216  * uvm_page_array_fill_and_peek:
217  * same as uvm_page_array_peek except that, if the array is empty, try to fill
218  * it first.
219  */
220 
221 struct vm_page *
uvm_page_array_fill_and_peek(struct uvm_page_array * ar,voff_t off,unsigned int nwant)222 uvm_page_array_fill_and_peek(struct uvm_page_array *ar, voff_t off,
223     unsigned int nwant)
224 {
225 	int error;
226 
227 	if (ar->ar_idx != ar->ar_npages) {
228 		return ar->ar_pages[ar->ar_idx];
229 	}
230 	error = uvm_page_array_fill(ar, off, nwant);
231 	if (error != 0) {
232 		return NULL;
233 	}
234 	return uvm_page_array_peek(ar);
235 }
236