xref: /openbsd/sys/uvm/uvm_object.c (revision 34c50303)
1 /*	$OpenBSD: uvm_object.c,v 1.25 2022/02/21 16:08:36 kn Exp $	*/
2 
3 /*
4  * Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * uvm_object.c: operate with memory objects
34  *
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mman.h>
40 #include <sys/atomic.h>
41 #include <sys/rwlock.h>
42 
43 #include <uvm/uvm.h>
44 
45 /* Dummy object used by some pmaps for sanity checks. */
46 const struct uvm_pagerops pmap_pager = {
47 	/* nothing */
48 };
49 
50 /* Dummy object used by the buffer cache for sanity checks. */
51 const struct uvm_pagerops bufcache_pager = {
52 	/* nothing */
53 };
54 
55 /* Page count to fetch per single step. */
56 #define	FETCH_PAGECOUNT	16
57 
58 /*
59  * uvm_obj_init: initialize UVM memory object.
60  */
61 void
uvm_obj_init(struct uvm_object * uobj,const struct uvm_pagerops * pgops,int refs)62 uvm_obj_init(struct uvm_object *uobj, const struct uvm_pagerops *pgops, int refs)
63 {
64 	int alock;
65 
66 	alock = ((pgops != NULL) && (pgops != &pmap_pager) &&
67 	    (pgops != &bufcache_pager) && (refs != UVM_OBJ_KERN));
68 
69 	if (alock) {
70 		/* Allocate and assign a lock. */
71 		rw_obj_alloc(&uobj->vmobjlock, "uobjlk");
72 	} else {
73 		/* The lock will need to be set via uvm_obj_setlock(). */
74 		uobj->vmobjlock = NULL;
75 	}
76 	uobj->pgops = pgops;
77 	RBT_INIT(uvm_objtree, &uobj->memt);
78 	uobj->uo_npages = 0;
79 	uobj->uo_refs = refs;
80 }
81 
82 /*
83  * uvm_obj_destroy: destroy UVM memory object.
84  */
85 void
uvm_obj_destroy(struct uvm_object * uo)86 uvm_obj_destroy(struct uvm_object *uo)
87 {
88 	KASSERT(RBT_EMPTY(uvm_objtree, &uo->memt));
89 
90 	rw_obj_free(uo->vmobjlock);
91 }
92 
93 /*
94  * uvm_obj_setlock: assign a vmobjlock to the UVM object.
95  *
96  * => Caller is responsible to ensure that UVM objects is not use.
97  * => Only dynamic lock may be previously set.  We drop the reference then.
98  */
99 void
uvm_obj_setlock(struct uvm_object * uo,struct rwlock * lockptr)100 uvm_obj_setlock(struct uvm_object *uo, struct rwlock *lockptr)
101 {
102 	struct rwlock *olockptr = uo->vmobjlock;
103 
104 	if (olockptr) {
105 		/* Drop the reference on the old lock. */
106 		rw_obj_free(olockptr);
107 	}
108 	if (lockptr == NULL) {
109 		/* If new lock is not passed - allocate default one. */
110 		rw_obj_alloc(&lockptr, "uobjlk");
111 	}
112 	uo->vmobjlock = lockptr;
113 }
114 
115 #ifndef SMALL_KERNEL
116 /*
117  * uvm_obj_wire: wire the pages of entire UVM object.
118  *
119  * => NOTE: this function should only be used for types of objects
120  *  where PG_RELEASED flag is never set (aobj objects)
121  * => caller must pass page-aligned start and end values
122  * => if the caller passes in a pageq pointer, we'll return a list of
123  *  wired pages.
124  */
125 
126 int
uvm_obj_wire(struct uvm_object * uobj,voff_t start,voff_t end,struct pglist * pageq)127 uvm_obj_wire(struct uvm_object *uobj, voff_t start, voff_t end,
128     struct pglist *pageq)
129 {
130 	int i, npages, left, error;
131 	struct vm_page *pgs[FETCH_PAGECOUNT];
132 	voff_t offset = start;
133 
134 	left = (end - start) >> PAGE_SHIFT;
135 
136 	rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK);
137 	while (left) {
138 
139 		npages = MIN(FETCH_PAGECOUNT, left);
140 
141 		/* Get the pages */
142 		memset(pgs, 0, sizeof(pgs));
143 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
144 			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
145 			PGO_ALLPAGES | PGO_SYNCIO);
146 
147 		if (error)
148 			goto error;
149 
150 		rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK);
151 		for (i = 0; i < npages; i++) {
152 
153 			KASSERT(pgs[i] != NULL);
154 			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
155 
156 			if (pgs[i]->pg_flags & PQ_AOBJ) {
157 				atomic_clearbits_int(&pgs[i]->pg_flags,
158 				    PG_CLEAN);
159 				uao_dropswap(uobj, i);
160 			}
161 		}
162 
163 		/* Wire the pages */
164 		uvm_lock_pageq();
165 		for (i = 0; i < npages; i++) {
166 			uvm_pagewire(pgs[i]);
167 			if (pageq != NULL)
168 				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
169 		}
170 		uvm_unlock_pageq();
171 
172 		/* Unbusy the pages */
173 		uvm_page_unbusy(pgs, npages);
174 
175 		left -= npages;
176 		offset += (voff_t)npages << PAGE_SHIFT;
177 	}
178 	rw_exit(uobj->vmobjlock);
179 
180 	return 0;
181 
182 error:
183 	/* Unwire the pages which have been wired */
184 	uvm_obj_unwire(uobj, start, offset);
185 
186 	return error;
187 }
188 
189 /*
190  * uvm_obj_unwire: unwire the pages of entire UVM object.
191  *
192  * => caller must pass page-aligned start and end values
193  */
194 void
uvm_obj_unwire(struct uvm_object * uobj,voff_t start,voff_t end)195 uvm_obj_unwire(struct uvm_object *uobj, voff_t start, voff_t end)
196 {
197 	struct vm_page *pg;
198 	off_t offset;
199 
200 	rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK);
201 	uvm_lock_pageq();
202 	for (offset = start; offset < end; offset += PAGE_SIZE) {
203 		pg = uvm_pagelookup(uobj, offset);
204 
205 		KASSERT(pg != NULL);
206 		KASSERT(!(pg->pg_flags & PG_RELEASED));
207 
208 		uvm_pageunwire(pg);
209 	}
210 	uvm_unlock_pageq();
211 	rw_exit(uobj->vmobjlock);
212 }
213 #endif /* !SMALL_KERNEL */
214 
215 /*
216  * uvm_obj_free: free all pages in a uvm object, used by the buffer
217  * cache to free all pages attached to a buffer.
218  */
219 void
uvm_obj_free(struct uvm_object * uobj)220 uvm_obj_free(struct uvm_object *uobj)
221 {
222 	struct vm_page *pg;
223 	struct pglist pgl;
224 
225 	KASSERT(UVM_OBJ_IS_BUFCACHE(uobj));
226 	KERNEL_ASSERT_LOCKED();
227 
228 	TAILQ_INIT(&pgl);
229  	/*
230 	 * Extract from rb tree in offset order. The phys addresses
231 	 * usually increase in that order, which is better for
232 	 * uvm_pglistfree().
233  	 */
234 	RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
235 		/*
236 		 * clear PG_TABLED so we don't do work to remove
237 		 * this pg from the uobj we are throwing away
238 		 */
239 		atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
240 		uvm_lock_pageq();
241 		uvm_pageclean(pg);
242 		uvm_unlock_pageq();
243 		TAILQ_INSERT_TAIL(&pgl, pg, pageq);
244  	}
245 	uvm_pglistfree(&pgl);
246 }
247 
248