1 /* $OpenBSD: nfs_srvcache.c,v 1.32 2024/09/18 05:21:19 jsg Exp $ */
2 /* $NetBSD: nfs_srvcache.c,v 1.12 1996/02/18 11:53:49 fvdl Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Rick Macklem at The University of Guelph.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)nfs_srvcache.c 8.3 (Berkeley) 3/30/95
36 */
37
38 /*
39 * Reference: Chet Juszczak, "Improving the Performance and Correctness
40 * of an NFS Server", in Proc. Winter 1989 USENIX Conference,
41 * pages 53-63. San Diego, February 1989.
42 */
43 #include <sys/param.h>
44 #include <sys/mount.h>
45 #include <sys/systm.h>
46 #include <sys/mbuf.h>
47 #include <sys/malloc.h>
48 #include <sys/socket.h>
49 #include <sys/queue.h>
50
51 #include <crypto/siphash.h>
52
53 #include <netinet/in.h>
54 #include <nfs/nfsproto.h>
55 #include <nfs/nfs.h>
56 #include <nfs/nfsrvcache.h>
57 #include <nfs/nfs_var.h>
58
59 extern struct nfsstats nfsstats;
60 extern const int nfsv2_procid[NFS_NPROCS];
61 long numnfsrvcache, desirednfsrvcache = NFSRVCACHESIZ;
62
63 struct nfsrvcache *nfsrv_lookupcache(struct nfsrv_descript *);
64 void nfsrv_cleanentry(struct nfsrvcache *);
65
66 LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
67 SIPHASH_KEY nfsrvhashkey;
68 TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
69 u_long nfsrvhash;
70 #define NFSRCHASH(xid) \
71 (&nfsrvhashtbl[SipHash24(&nfsrvhashkey, &(xid), sizeof(xid)) & nfsrvhash])
72
73 #define NETFAMILY(rp) \
74 (((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_UNSPEC)
75
76 /* Array that defines which nfs rpc's are nonidempotent */
77 static const int nonidempotent[NFS_NPROCS] = {
78 0, 0, 1, 0, 0, 0, 0, 1,
79 1, 1, 1, 1, 1, 1, 1, 1,
80 0, 0, 0, 0, 0, 0, 0
81 };
82
83 /* True iff the rpc reply is an nfs status ONLY! */
84 static const int nfsv2_repstat[NFS_NPROCS] = {
85 0, 0, 0, 0, 0, 0, 0, 0,
86 0, 0, 1, 1, 1, 1, 0, 1,
87 0, 0
88 };
89
90 void
nfsrv_cleanentry(struct nfsrvcache * rp)91 nfsrv_cleanentry(struct nfsrvcache *rp)
92 {
93 if ((rp->rc_flag & RC_REPMBUF) != 0)
94 m_freem(rp->rc_reply);
95
96 if ((rp->rc_flag & RC_NAM) != 0)
97 m_free(rp->rc_nam);
98
99 rp->rc_flag &= ~(RC_REPSTATUS|RC_REPMBUF);
100 }
101
102 /* Initialize the server request cache list */
103 void
nfsrv_initcache(void)104 nfsrv_initcache(void)
105 {
106
107 nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, M_WAITOK, &nfsrvhash);
108 arc4random_buf(&nfsrvhashkey, sizeof(nfsrvhashkey));
109 TAILQ_INIT(&nfsrvlruhead);
110 }
111
112 /*
113 * Look for the request in the cache
114 * If found then
115 * return action and optionally reply
116 * else
117 * insert it in the cache
118 *
119 * The rules are as follows:
120 * - if in progress, return DROP request
121 * - if completed within DELAY of the current time, return DROP it
122 * - if completed a longer time ago return REPLY if the reply was cached or
123 * return DOIT
124 * Update/add new request at end of lru list
125 */
126 int
nfsrv_getcache(struct nfsrv_descript * nd,struct nfssvc_sock * slp,struct mbuf ** repp)127 nfsrv_getcache(struct nfsrv_descript *nd, struct nfssvc_sock *slp,
128 struct mbuf **repp)
129 {
130 struct nfsrvhash *hash;
131 struct nfsrvcache *rp;
132 struct mbuf *mb;
133 struct sockaddr_in *saddr;
134 int ret;
135
136 /*
137 * Don't cache recent requests for reliable transport protocols.
138 * (Maybe we should for the case of a reconnect, but..)
139 */
140 if (!nd->nd_nam2)
141 return (RC_DOIT);
142
143 rp = nfsrv_lookupcache(nd);
144 if (rp) {
145 /* If not at end of LRU chain, move it there */
146 if (TAILQ_NEXT(rp, rc_lru)) {
147 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
148 TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
149 }
150 if (rp->rc_state == RC_UNUSED)
151 panic("nfsrv cache");
152 if (rp->rc_state == RC_INPROG) {
153 nfsstats.srvcache_inproghits++;
154 ret = RC_DROPIT;
155 } else if (rp->rc_flag & RC_REPSTATUS) {
156 nfsstats.srvcache_nonidemdonehits++;
157 nfs_rephead(0, nd, slp, rp->rc_status, repp, &mb);
158 ret = RC_REPLY;
159 } else if (rp->rc_flag & RC_REPMBUF) {
160 nfsstats.srvcache_nonidemdonehits++;
161 *repp = m_copym(rp->rc_reply, 0, M_COPYALL, M_WAIT);
162 ret = RC_REPLY;
163 } else {
164 nfsstats.srvcache_idemdonehits++;
165 rp->rc_state = RC_INPROG;
166 ret = RC_DOIT;
167 }
168 rp->rc_flag &= ~RC_LOCKED;
169 if (rp->rc_flag & RC_WANTED) {
170 rp->rc_flag &= ~RC_WANTED;
171 wakeup(rp);
172 }
173 return (ret);
174 }
175
176 nfsstats.srvcache_misses++;
177 if (numnfsrvcache < desirednfsrvcache) {
178 rp = malloc(sizeof(*rp), M_NFSD, M_WAITOK|M_ZERO);
179 numnfsrvcache++;
180 rp->rc_flag = RC_LOCKED;
181 } else {
182 rp = TAILQ_FIRST(&nfsrvlruhead);
183 while ((rp->rc_flag & RC_LOCKED) != 0) {
184 rp->rc_flag |= RC_WANTED;
185 tsleep_nsec(rp, PZERO-1, "nfsrc", INFSLP);
186 rp = TAILQ_FIRST(&nfsrvlruhead);
187 }
188 rp->rc_flag |= RC_LOCKED;
189 LIST_REMOVE(rp, rc_hash);
190 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
191 nfsrv_cleanentry(rp);
192 rp->rc_flag &= (RC_LOCKED | RC_WANTED);
193 }
194 TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
195 rp->rc_state = RC_INPROG;
196 rp->rc_xid = nd->nd_retxid;
197 saddr = mtod(nd->nd_nam, struct sockaddr_in *);
198 switch (saddr->sin_family) {
199 case AF_INET:
200 rp->rc_flag |= RC_INETADDR;
201 rp->rc_inetaddr = saddr->sin_addr.s_addr;
202 break;
203 default:
204 rp->rc_flag |= RC_NAM;
205 rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT);
206 break;
207 }
208 rp->rc_proc = nd->nd_procnum;
209 hash = NFSRCHASH(nd->nd_retxid);
210 LIST_INSERT_HEAD(hash, rp, rc_hash);
211 rp->rc_flag &= ~RC_LOCKED;
212 if (rp->rc_flag & RC_WANTED) {
213 rp->rc_flag &= ~RC_WANTED;
214 wakeup(rp);
215 }
216 return (RC_DOIT);
217 }
218
219 /* Update a request cache entry after the rpc has been done */
220 void
nfsrv_updatecache(struct nfsrv_descript * nd,int repvalid,struct mbuf * repmbuf)221 nfsrv_updatecache(struct nfsrv_descript *nd, int repvalid,
222 struct mbuf *repmbuf)
223 {
224 struct nfsrvcache *rp;
225
226 if (!nd->nd_nam2)
227 return;
228
229 rp = nfsrv_lookupcache(nd);
230 if (rp) {
231 nfsrv_cleanentry(rp);
232 rp->rc_state = RC_DONE;
233 /*
234 * If we have a valid reply update status and save
235 * the reply for non-idempotent rpc's.
236 */
237 if (repvalid && nonidempotent[nd->nd_procnum]) {
238 if ((nd->nd_flag & ND_NFSV3) == 0 &&
239 nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
240 rp->rc_status = nd->nd_repstat;
241 rp->rc_flag |= RC_REPSTATUS;
242 } else {
243 rp->rc_reply = m_copym(repmbuf, 0, M_COPYALL,
244 M_WAIT);
245 rp->rc_flag |= RC_REPMBUF;
246 }
247 }
248 rp->rc_flag &= ~RC_LOCKED;
249 if (rp->rc_flag & RC_WANTED) {
250 rp->rc_flag &= ~RC_WANTED;
251 wakeup(rp);
252 }
253 return;
254 }
255 }
256
257 /* Clean out the cache. Called when the last nfsd terminates. */
258 void
nfsrv_cleancache(void)259 nfsrv_cleancache(void)
260 {
261 struct nfsrvcache *rp, *nextrp;
262
263 for (rp = TAILQ_FIRST(&nfsrvlruhead); rp != NULL; rp = nextrp) {
264 nextrp = TAILQ_NEXT(rp, rc_lru);
265 LIST_REMOVE(rp, rc_hash);
266 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
267 nfsrv_cleanentry(rp);
268 free(rp, M_NFSD, sizeof(*rp));
269 }
270 numnfsrvcache = 0;
271 }
272
273 struct nfsrvcache *
nfsrv_lookupcache(struct nfsrv_descript * nd)274 nfsrv_lookupcache(struct nfsrv_descript *nd)
275 {
276 struct nfsrvhash *hash;
277 struct nfsrvcache *rp;
278
279 hash = NFSRCHASH(nd->nd_retxid);
280 loop:
281 LIST_FOREACH(rp, hash, rc_hash) {
282 if (nd->nd_retxid == rp->rc_xid &&
283 nd->nd_procnum == rp->rc_proc &&
284 netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
285 if ((rp->rc_flag & RC_LOCKED)) {
286 rp->rc_flag |= RC_WANTED;
287 tsleep_nsec(rp, PZERO - 1, "nfsrc", INFSLP);
288 goto loop;
289 }
290 rp->rc_flag |= RC_LOCKED;
291 return (rp);
292 }
293 }
294
295 return (NULL);
296 }
297