1 /*	$NetBSD: tmpfs_mem.c,v 1.8 2014/06/13 11:57:48 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * tmpfs memory allocation routines.
34  * Implements memory usage accounting and limiting.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: tmpfs_mem.c,v 1.8 2014/06/13 11:57:48 pooka Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/atomic.h>
42 #include <sys/kmem.h>
43 #include <sys/namei.h>
44 #include <sys/pool.h>
45 
46 #include <fs/tmpfs/tmpfs.h>
47 
48 extern struct pool	tmpfs_dirent_pool;
49 extern struct pool	tmpfs_node_pool;
50 
51 void
tmpfs_mntmem_init(struct tmpfs_mount * mp,uint64_t memlimit)52 tmpfs_mntmem_init(struct tmpfs_mount *mp, uint64_t memlimit)
53 {
54 
55 	mutex_init(&mp->tm_acc_lock, MUTEX_DEFAULT, IPL_NONE);
56 	mp->tm_mem_limit = memlimit;
57 	mp->tm_bytes_used = 0;
58 }
59 
60 void
tmpfs_mntmem_destroy(struct tmpfs_mount * mp)61 tmpfs_mntmem_destroy(struct tmpfs_mount *mp)
62 {
63 
64 	KASSERT(mp->tm_bytes_used == 0);
65 	mutex_destroy(&mp->tm_acc_lock);
66 }
67 
68 int
tmpfs_mntmem_set(struct tmpfs_mount * mp,uint64_t memlimit)69 tmpfs_mntmem_set(struct tmpfs_mount *mp, uint64_t memlimit)
70 {
71 	int error;
72 
73 	mutex_enter(&mp->tm_acc_lock);
74 	if (round_page(mp->tm_bytes_used) >= memlimit)
75 		error = EBUSY;
76 	else {
77 		error = 0;
78 		mp->tm_mem_limit = memlimit;
79 	}
80 	mutex_exit(&mp->tm_acc_lock);
81 	return error;
82 }
83 
84 
85 
86 /*
87  * tmpfs_mem_info: return the number of available memory pages.
88  *
89  * => If 'total' is true, then return _total_ amount of pages.
90  * => If false, then return the amount of _free_ memory pages.
91  *
92  * Remember to remove uvmexp.freetarg from the returned value to avoid
93  * excessive memory usage.
94  */
95 size_t
tmpfs_mem_info(bool total)96 tmpfs_mem_info(bool total)
97 {
98 	size_t size = 0;
99 
100 	/* XXX: unlocked */
101 	size += uvmexp.swpgavail;
102 	if (!total) {
103 		size -= uvmexp.swpgonly;
104 	}
105 	size += uvmexp.free;
106 	size += uvmexp.filepages;
107 	if (size > uvmexp.wired) {
108 		size -= uvmexp.wired;
109 	} else {
110 		size = 0;
111 	}
112 	return size;
113 }
114 
115 uint64_t
tmpfs_bytes_max(struct tmpfs_mount * mp)116 tmpfs_bytes_max(struct tmpfs_mount *mp)
117 {
118 	psize_t freepages = tmpfs_mem_info(false);
119 	uint64_t avail_mem;
120 
121 	if (freepages < uvmexp.freetarg) {
122 		freepages = 0;
123 	} else {
124 		freepages -= uvmexp.freetarg;
125 	}
126 	avail_mem = round_page(mp->tm_bytes_used) + (freepages << PAGE_SHIFT);
127 	return MIN(mp->tm_mem_limit, avail_mem);
128 }
129 
130 size_t
tmpfs_pages_avail(struct tmpfs_mount * mp)131 tmpfs_pages_avail(struct tmpfs_mount *mp)
132 {
133 
134 	return (tmpfs_bytes_max(mp) - mp->tm_bytes_used) >> PAGE_SHIFT;
135 }
136 
137 bool
tmpfs_mem_incr(struct tmpfs_mount * mp,size_t sz)138 tmpfs_mem_incr(struct tmpfs_mount *mp, size_t sz)
139 {
140 	uint64_t lim;
141 
142 	mutex_enter(&mp->tm_acc_lock);
143 	lim = tmpfs_bytes_max(mp);
144 	if (mp->tm_bytes_used + sz >= lim) {
145 		mutex_exit(&mp->tm_acc_lock);
146 		return false;
147 	}
148 	mp->tm_bytes_used += sz;
149 	mutex_exit(&mp->tm_acc_lock);
150 	return true;
151 }
152 
153 void
tmpfs_mem_decr(struct tmpfs_mount * mp,size_t sz)154 tmpfs_mem_decr(struct tmpfs_mount *mp, size_t sz)
155 {
156 
157 	mutex_enter(&mp->tm_acc_lock);
158 	KASSERT(mp->tm_bytes_used >= sz);
159 	mp->tm_bytes_used -= sz;
160 	mutex_exit(&mp->tm_acc_lock);
161 }
162 
163 struct tmpfs_dirent *
tmpfs_dirent_get(struct tmpfs_mount * mp)164 tmpfs_dirent_get(struct tmpfs_mount *mp)
165 {
166 
167 	if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_dirent))) {
168 		return NULL;
169 	}
170 	return pool_get(&tmpfs_dirent_pool, PR_WAITOK);
171 }
172 
173 void
tmpfs_dirent_put(struct tmpfs_mount * mp,struct tmpfs_dirent * de)174 tmpfs_dirent_put(struct tmpfs_mount *mp, struct tmpfs_dirent *de)
175 {
176 
177 	tmpfs_mem_decr(mp, sizeof(struct tmpfs_dirent));
178 	pool_put(&tmpfs_dirent_pool, de);
179 }
180 
181 struct tmpfs_node *
tmpfs_node_get(struct tmpfs_mount * mp)182 tmpfs_node_get(struct tmpfs_mount *mp)
183 {
184 
185 	if (atomic_inc_uint_nv(&mp->tm_nodes_cnt) >= mp->tm_nodes_max) {
186 		atomic_dec_uint(&mp->tm_nodes_cnt);
187 		return NULL;
188 	}
189 	if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_node))) {
190 		return NULL;
191 	}
192 	return pool_get(&tmpfs_node_pool, PR_WAITOK);
193 }
194 
195 void
tmpfs_node_put(struct tmpfs_mount * mp,struct tmpfs_node * tn)196 tmpfs_node_put(struct tmpfs_mount *mp, struct tmpfs_node *tn)
197 {
198 
199 	atomic_dec_uint(&mp->tm_nodes_cnt);
200 	tmpfs_mem_decr(mp, sizeof(struct tmpfs_node));
201 	pool_put(&tmpfs_node_pool, tn);
202 }
203 
204 /*
205  * Quantum size to round-up the tmpfs names in order to reduce re-allocations.
206  */
207 
208 #define	TMPFS_NAME_QUANTUM	(32)
209 
210 char *
tmpfs_strname_alloc(struct tmpfs_mount * mp,size_t len)211 tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len)
212 {
213 	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);
214 
215 	KASSERT(sz > 0 && sz <= 1024);
216 	if (!tmpfs_mem_incr(mp, sz)) {
217 		return NULL;
218 	}
219 	return kmem_alloc(sz, KM_SLEEP);
220 }
221 
222 void
tmpfs_strname_free(struct tmpfs_mount * mp,char * str,size_t len)223 tmpfs_strname_free(struct tmpfs_mount *mp, char *str, size_t len)
224 {
225 	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);
226 
227 	KASSERT(sz > 0 && sz <= 1024);
228 	tmpfs_mem_decr(mp, sz);
229 	kmem_free(str, sz);
230 }
231 
232 bool
tmpfs_strname_neqlen(struct componentname * fcnp,struct componentname * tcnp)233 tmpfs_strname_neqlen(struct componentname *fcnp, struct componentname *tcnp)
234 {
235 	const size_t fln = roundup2(fcnp->cn_namelen, TMPFS_NAME_QUANTUM);
236 	const size_t tln = roundup2(tcnp->cn_namelen, TMPFS_NAME_QUANTUM);
237 
238 	return (fln != tln) || memcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fln);
239 }
240