xref: /openbsd/sys/uvm/uvm_object.c (revision 09467b48)
1 /*	$OpenBSD: uvm_object.c,v 1.15 2019/11/29 22:10:04 beck Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * uvm_object.c: operate with memory objects
34  *
35  */
36 
37 #include <sys/param.h>
38 #include <sys/mman.h>
39 #include <sys/atomic.h>
40 
41 #include <uvm/uvm.h>
42 
43 /* We will fetch this page count per step */
44 #define	FETCH_PAGECOUNT	16
45 
46 /*
47  * uvm_objinit: initialise a uvm object.
48  */
49 void
50 uvm_objinit(struct uvm_object *uobj, struct uvm_pagerops *pgops, int refs)
51 {
52 	uobj->pgops = pgops;
53 	RBT_INIT(uvm_objtree, &uobj->memt);
54 	uobj->uo_npages = 0;
55 	uobj->uo_refs = refs;
56 }
57 
58 #ifndef SMALL_KERNEL
59 /*
60  * uvm_objwire: wire the pages of entire uobj
61  *
62  * => caller must pass page-aligned start and end values
63  * => if the caller passes in a pageq pointer, we'll return a list of
64  *  wired pages.
65  */
66 
67 int
68 uvm_objwire(struct uvm_object *uobj, voff_t start, voff_t end,
69     struct pglist *pageq)
70 {
71 	int i, npages, left, error;
72 	struct vm_page *pgs[FETCH_PAGECOUNT];
73 	voff_t offset = start;
74 
75 	left = (end - start) >> PAGE_SHIFT;
76 
77 	while (left) {
78 
79 		npages = MIN(FETCH_PAGECOUNT, left);
80 
81 		/* Get the pages */
82 		memset(pgs, 0, sizeof(pgs));
83 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
84 			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
85 			PGO_ALLPAGES | PGO_SYNCIO);
86 
87 		if (error)
88 			goto error;
89 
90 		for (i = 0; i < npages; i++) {
91 
92 			KASSERT(pgs[i] != NULL);
93 			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
94 
95 			if (pgs[i]->pg_flags & PQ_AOBJ) {
96 				atomic_clearbits_int(&pgs[i]->pg_flags,
97 				    PG_CLEAN);
98 				uao_dropswap(uobj, i);
99 			}
100 		}
101 
102 		/* Wire the pages */
103 		uvm_lock_pageq();
104 		for (i = 0; i < npages; i++) {
105 			uvm_pagewire(pgs[i]);
106 			if (pageq != NULL)
107 				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
108 		}
109 		uvm_unlock_pageq();
110 
111 		/* Unbusy the pages */
112 		uvm_page_unbusy(pgs, npages);
113 
114 		left -= npages;
115 		offset += (voff_t)npages << PAGE_SHIFT;
116 	}
117 
118 	return 0;
119 
120 error:
121 	/* Unwire the pages which have been wired */
122 	uvm_objunwire(uobj, start, offset);
123 
124 	return error;
125 }
126 
127 /*
128  * uobj_unwirepages: unwire the pages of entire uobj
129  *
130  * => caller must pass page-aligned start and end values
131  */
132 
133 void
134 uvm_objunwire(struct uvm_object *uobj, voff_t start, voff_t end)
135 {
136 	struct vm_page *pg;
137 	off_t offset;
138 
139 	uvm_lock_pageq();
140 	for (offset = start; offset < end; offset += PAGE_SIZE) {
141 		pg = uvm_pagelookup(uobj, offset);
142 
143 		KASSERT(pg != NULL);
144 		KASSERT(!(pg->pg_flags & PG_RELEASED));
145 
146 		uvm_pageunwire(pg);
147 	}
148 	uvm_unlock_pageq();
149 }
150 #endif /* !SMALL_KERNEL */
151 
152 /*
153  * uvm_objfree: free all pages in a uvm object, used by the buffer
154  * cache to free all pages attached to a buffer.
155  */
156 void
157 uvm_objfree(struct uvm_object *uobj)
158 {
159 	struct vm_page *pg;
160 	struct pglist pgl;
161 
162 	TAILQ_INIT(&pgl);
163  	/*
164 	 * Extract from rb tree in offset order. The phys addresses
165 	 * usually increase in that order, which is better for
166 	 * uvm_pmr_freepageq.
167  	 */
168 	RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
169 		/*
170 		 * clear PG_TABLED so we don't do work to remove
171 		 * this pg from the uobj we are throwing away
172 		 */
173 		atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
174 		uvm_pageclean(pg);
175 		TAILQ_INSERT_TAIL(&pgl, pg, pageq);
176  	}
177 	uvm_pmr_freepageq(&pgl);
178 }
179 
180