xref: /openbsd/sys/uvm/uvm_object.c (revision 55cc5ba3)
1 /*	$OpenBSD: uvm_object.c,v 1.18 2020/11/24 13:49:09 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * uvm_object.c: operate with memory objects
34  *
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mman.h>
40 #include <sys/atomic.h>
41 
42 #include <uvm/uvm.h>
43 
44 /* We will fetch this page count per step */
45 #define	FETCH_PAGECOUNT	16
46 
47 /*
48  * uvm_objinit: initialise a uvm object.
49  */
50 void
51 uvm_objinit(struct uvm_object *uobj, const struct uvm_pagerops *pgops, int refs)
52 {
53 	uobj->pgops = pgops;
54 	RBT_INIT(uvm_objtree, &uobj->memt);
55 	uobj->uo_npages = 0;
56 	uobj->uo_refs = refs;
57 }
58 
59 #ifndef SMALL_KERNEL
60 /*
61  * uvm_objwire: wire the pages of entire uobj
62  *
63  * => caller must pass page-aligned start and end values
64  * => if the caller passes in a pageq pointer, we'll return a list of
65  *  wired pages.
66  */
67 
68 int
69 uvm_objwire(struct uvm_object *uobj, voff_t start, voff_t end,
70     struct pglist *pageq)
71 {
72 	int i, npages, left, error;
73 	struct vm_page *pgs[FETCH_PAGECOUNT];
74 	voff_t offset = start;
75 
76 	left = (end - start) >> PAGE_SHIFT;
77 
78 	while (left) {
79 
80 		npages = MIN(FETCH_PAGECOUNT, left);
81 
82 		/* Get the pages */
83 		memset(pgs, 0, sizeof(pgs));
84 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
85 			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
86 			PGO_ALLPAGES | PGO_SYNCIO);
87 
88 		if (error)
89 			goto error;
90 
91 		for (i = 0; i < npages; i++) {
92 
93 			KASSERT(pgs[i] != NULL);
94 			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
95 
96 			if (pgs[i]->pg_flags & PQ_AOBJ) {
97 				atomic_clearbits_int(&pgs[i]->pg_flags,
98 				    PG_CLEAN);
99 				uao_dropswap(uobj, i);
100 			}
101 		}
102 
103 		/* Wire the pages */
104 		uvm_lock_pageq();
105 		for (i = 0; i < npages; i++) {
106 			uvm_pagewire(pgs[i]);
107 			if (pageq != NULL)
108 				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
109 		}
110 		uvm_unlock_pageq();
111 
112 		/* Unbusy the pages */
113 		uvm_page_unbusy(pgs, npages);
114 
115 		left -= npages;
116 		offset += (voff_t)npages << PAGE_SHIFT;
117 	}
118 
119 	return 0;
120 
121 error:
122 	/* Unwire the pages which have been wired */
123 	uvm_objunwire(uobj, start, offset);
124 
125 	return error;
126 }
127 
128 /*
129  * uobj_unwirepages: unwire the pages of entire uobj
130  *
131  * => caller must pass page-aligned start and end values
132  */
133 
134 void
135 uvm_objunwire(struct uvm_object *uobj, voff_t start, voff_t end)
136 {
137 	struct vm_page *pg;
138 	off_t offset;
139 
140 	uvm_lock_pageq();
141 	for (offset = start; offset < end; offset += PAGE_SIZE) {
142 		pg = uvm_pagelookup(uobj, offset);
143 
144 		KASSERT(pg != NULL);
145 		KASSERT(!(pg->pg_flags & PG_RELEASED));
146 
147 		uvm_pageunwire(pg);
148 	}
149 	uvm_unlock_pageq();
150 }
151 #endif /* !SMALL_KERNEL */
152 
153 /*
154  * uvm_objfree: free all pages in a uvm object, used by the buffer
155  * cache to free all pages attached to a buffer.
156  */
157 void
158 uvm_objfree(struct uvm_object *uobj)
159 {
160 	struct vm_page *pg;
161 	struct pglist pgl;
162 
163 	TAILQ_INIT(&pgl);
164  	/*
165 	 * Extract from rb tree in offset order. The phys addresses
166 	 * usually increase in that order, which is better for
167 	 * uvm_pmr_freepageq.
168  	 */
169 	RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
170 		/*
171 		 * clear PG_TABLED so we don't do work to remove
172 		 * this pg from the uobj we are throwing away
173 		 */
174 		atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
175 		uvm_lock_pageq();
176 		uvm_pageclean(pg);
177 		uvm_unlock_pageq();
178 		TAILQ_INSERT_TAIL(&pgl, pg, pageq);
179  	}
180 	uvm_pmr_freepageq(&pgl);
181 }
182 
183