xref: /dragonfly/sys/vfs/nfs/nfs_srvcache.c (revision d0d42ea0)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)nfs_srvcache.c	8.3 (Berkeley) 3/30/95
37  * $FreeBSD: src/sys/nfs/nfs_srvcache.c,v 1.21 2000/02/13 03:32:06 peter Exp $
38  */
39 
40 /*
41  * Reference: Chet Juszczak, "Improving the Performance and Correctness
42  *		of an NFS Server", in Proc. Winter 1989 USENIX Conference,
43  *		pages 53-63. San Diego, February 1989.
44  */
45 #include <sys/param.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/systm.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>	/* for dup_sockaddr */
52 
53 #include <netinet/in.h>
54 #include "rpcv2.h"
55 #include "nfsproto.h"
56 #include "nfs.h"
57 #include "nfsrvcache.h"
58 
59 #ifndef NFS_NOSERVER
60 static long numnfsrvcache;
61 static long desirednfsrvcache = NFSRVCACHESIZ;
62 
63 #define	NFSRCHASH(xid) \
64 	(&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
65 static LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
66 static TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
67 static u_long nfsrvhash;
68 
69 #define TRUE	1
70 #define	FALSE	0
71 
72 #define	NETFAMILY(rp) \
73 		(((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
74 
75 struct lwkt_token srvcache_token = LWKT_TOKEN_INITIALIZER(srvcache_token);
76 
77 /*
78  * Static array that defines which nfs rpc's are nonidempotent
79  */
80 static int nonidempotent[NFS_NPROCS] = {
81 	FALSE,
82 	FALSE,
83 	TRUE,
84 	FALSE,
85 	FALSE,
86 	FALSE,
87 	FALSE,
88 	TRUE,
89 	TRUE,
90 	TRUE,
91 	TRUE,
92 	TRUE,
93 	TRUE,
94 	TRUE,
95 	TRUE,
96 	TRUE,
97 	FALSE,
98 	FALSE,
99 	FALSE,
100 	FALSE,
101 	FALSE,
102 	FALSE,
103 	FALSE,
104 	FALSE,
105 	FALSE,
106 	FALSE,
107 };
108 
109 /* True iff the rpc reply is an nfs status ONLY! */
110 static int nfsv2_repstat[NFS_NPROCS] = {
111 	FALSE,
112 	FALSE,
113 	FALSE,
114 	FALSE,
115 	FALSE,
116 	FALSE,
117 	FALSE,
118 	FALSE,
119 	FALSE,
120 	FALSE,
121 	TRUE,
122 	TRUE,
123 	TRUE,
124 	TRUE,
125 	FALSE,
126 	TRUE,
127 	FALSE,
128 	FALSE,
129 };
130 
131 /*
132  * Initialize the server request cache list
133  */
134 void
135 nfsrv_initcache(void)
136 {
137 	nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, &nfsrvhash);
138 	TAILQ_INIT(&nfsrvlruhead);
139 }
140 
141 /*
142  * Look for the request in the cache
143  * If found then
144  *    return action and optionally reply
145  * else
146  *    insert it in the cache
147  *
148  * The rules are as follows:
149  * - if in progress, return DROP request
150  * - if completed within DELAY of the current time, return DROP it
151  * - if completed a longer time ago return REPLY if the reply was cached or
152  *   return DOIT
153  * Update/add new request at end of lru list
154  */
155 int
156 nfsrv_getcache(struct nfsrv_descript *nd, struct nfssvc_sock *slp,
157 	       struct mbuf **repp)
158 {
159 	struct nfsrvcache *rp;
160 	struct mbuf *mb;
161 	struct sockaddr_in *saddr;
162 	caddr_t bpos;
163 	int ret;
164 
165 	/*
166 	 * Don't cache recent requests for reliable transport protocols.
167 	 * (Maybe we should for the case of a reconnect, but..)
168 	 */
169 	if (!nd->nd_nam2)
170 		return (RC_DOIT);
171 
172 	lwkt_gettoken(&srvcache_token);
173 loop:
174 	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
175 	    rp = rp->rc_hash.le_next) {
176 	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
177 		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
178 		        NFS_DPF(RC, ("H%03x", rp->rc_xid & 0xfff));
179 			if ((rp->rc_flag & RC_LOCKED) != 0) {
180 				rp->rc_flag |= RC_WANTED;
181 				tsleep((caddr_t)rp, 0, "nfsrc", 0);
182 				goto loop;
183 			}
184 			rp->rc_flag |= RC_LOCKED;
185 			/* If not at end of LRU chain, move it there */
186 			if (TAILQ_NEXT(rp, rc_lru) != NULL) {
187 				TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
188 				TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
189 			}
190 			if (rp->rc_state == RC_UNUSED)
191 				panic("nfsrv cache");
192 			if (rp->rc_state == RC_INPROG) {
193 				nfsstats.srvcache_inproghits++;
194 				ret = RC_DROPIT;
195 			} else if (rp->rc_flag & RC_REPSTATUS) {
196 				nfsstats.srvcache_nonidemdonehits++;
197 				nfs_rephead(0, nd, slp, rp->rc_status,
198 					    repp, &mb, &bpos);
199 				ret = RC_REPLY;
200 			} else if (rp->rc_flag & RC_REPMBUF) {
201 				nfsstats.srvcache_nonidemdonehits++;
202 				*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
203 						MB_WAIT);
204 				ret = RC_REPLY;
205 			} else {
206 				nfsstats.srvcache_idemdonehits++;
207 				rp->rc_state = RC_INPROG;
208 				ret = RC_DOIT;
209 			}
210 			rp->rc_flag &= ~RC_LOCKED;
211 			if (rp->rc_flag & RC_WANTED) {
212 				rp->rc_flag &= ~RC_WANTED;
213 				wakeup((caddr_t)rp);
214 			}
215 			lwkt_reltoken(&srvcache_token);
216 			return (ret);
217 		}
218 	}
219 
220 	nfsstats.srvcache_misses++;
221 	NFS_DPF(RC, ("M%03x", nd->nd_retxid & 0xfff));
222 	if (numnfsrvcache < desirednfsrvcache) {
223 		rp = kmalloc((u_long)sizeof *rp, M_NFSD, M_WAITOK | M_ZERO);
224 		numnfsrvcache++;
225 		rp->rc_flag = RC_LOCKED;
226 	} else {
227 		rp = TAILQ_FIRST(&nfsrvlruhead);
228 		while ((rp->rc_flag & RC_LOCKED) != 0) {
229 			rp->rc_flag |= RC_WANTED;
230 			tsleep((caddr_t)rp, 0, "nfsrc", 0);
231 			rp = TAILQ_FIRST(&nfsrvlruhead);
232 		}
233 		rp->rc_flag |= RC_LOCKED;
234 		LIST_REMOVE(rp, rc_hash);
235 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
236 		if (rp->rc_flag & RC_REPMBUF) {
237 			m_freem(rp->rc_reply);
238 			rp->rc_reply = NULL;
239 			rp->rc_flag &= ~RC_REPMBUF;
240 		}
241 		if (rp->rc_flag & RC_NAM) {
242 			kfree(rp->rc_nam, M_SONAME);
243 			rp->rc_nam = NULL;
244 			rp->rc_flag &= ~RC_NAM;
245 		}
246 	}
247 	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
248 
249 	rp->rc_state = RC_INPROG;
250 	rp->rc_xid = nd->nd_retxid;
251 	saddr = (struct sockaddr_in *)nd->nd_nam;
252 	switch (saddr->sin_family) {
253 	case AF_INET:
254 		rp->rc_flag |= RC_INETADDR;
255 		rp->rc_inetaddr = saddr->sin_addr.s_addr;
256 		break;
257 	case AF_ISO:
258 	default:
259 		rp->rc_flag |= RC_NAM;
260 		rp->rc_nam = dup_sockaddr(nd->nd_nam);
261 		break;
262 	};
263 	rp->rc_proc = nd->nd_procnum;
264 	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
265 	rp->rc_flag &= ~RC_LOCKED;
266 	if (rp->rc_flag & RC_WANTED) {
267 		rp->rc_flag &= ~RC_WANTED;
268 		wakeup((caddr_t)rp);
269 	}
270 	lwkt_reltoken(&srvcache_token);
271 
272 	return (RC_DOIT);
273 }
274 
275 /*
276  * Update a request cache entry after the rpc has been done
277  */
278 void
279 nfsrv_updatecache(struct nfsrv_descript *nd, int repvalid, struct mbuf *repmbuf)
280 {
281 	struct nfsrvcache *rp;
282 
283 	if (!nd->nd_nam2)
284 		return;
285 
286 	lwkt_gettoken(&srvcache_token);
287 loop:
288 	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
289 	    rp = rp->rc_hash.le_next) {
290 	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
291 		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
292 			NFS_DPF(RC, ("U%03x", rp->rc_xid & 0xfff));
293 			if ((rp->rc_flag & RC_LOCKED) != 0) {
294 				rp->rc_flag |= RC_WANTED;
295 				tsleep((caddr_t)rp, 0, "nfsrc", 0);
296 				goto loop;
297 			}
298 			rp->rc_flag |= RC_LOCKED;
299 			if (rp->rc_state == RC_DONE) {
300 				/*
301 				 * This can occur if the cache is too small.
302 				 * Retransmits of the same request aren't
303 				 * dropped so we may see the operation
304 				 * complete more then once.
305 				 */
306 				if (rp->rc_flag & RC_REPMBUF) {
307 					m_freem(rp->rc_reply);
308 					rp->rc_reply = NULL;
309 					rp->rc_flag &= ~RC_REPMBUF;
310 				}
311 			}
312 			rp->rc_state = RC_DONE;
313 
314 			/*
315 			 * If we have a valid reply update status and save
316 			 * the reply for non-idempotent rpc's.
317 			 */
318 			if (repvalid && nonidempotent[nd->nd_procnum]) {
319 				if ((nd->nd_flag & ND_NFSV3) == 0 &&
320 				  nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
321 					rp->rc_status = nd->nd_repstat;
322 					rp->rc_flag |= RC_REPSTATUS;
323 				} else {
324 					if (rp->rc_flag & RC_REPMBUF) {
325 						m_freem(rp->rc_reply);
326 						rp->rc_reply = NULL;
327 						rp->rc_flag &= ~RC_REPMBUF;
328 					}
329 					rp->rc_reply = m_copym(repmbuf, 0,
330 							M_COPYALL, MB_WAIT);
331 					rp->rc_flag |= RC_REPMBUF;
332 				}
333 			}
334 			rp->rc_flag &= ~RC_LOCKED;
335 			if (rp->rc_flag & RC_WANTED) {
336 				rp->rc_flag &= ~RC_WANTED;
337 				wakeup((caddr_t)rp);
338 			}
339 			break;
340 		}
341 	}
342 	lwkt_reltoken(&srvcache_token);
343 	NFS_DPF(RC, ("L%03x", nd->nd_retxid & 0xfff));
344 }
345 
346 /*
347  * Clean out the cache. Called when the last nfsd terminates.
348  */
349 void
350 nfsrv_cleancache(void)
351 {
352 	struct nfsrvcache *rp;
353 
354 	lwkt_gettoken(&srvcache_token);
355 	while ((rp = TAILQ_FIRST(&nfsrvlruhead)) != NULL) {
356 		if (rp->rc_flag & RC_LOCKED) {
357 			rp->rc_flag |= RC_WANTED;
358 			tsleep((caddr_t)rp, 0, "nfsrc", 0);
359 			continue;
360 		}
361 		LIST_REMOVE(rp, rc_hash);
362 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
363 		if (rp->rc_flag & RC_REPMBUF) {
364 			m_freem(rp->rc_reply);
365 			rp->rc_reply = NULL;
366 			rp->rc_flag &= ~RC_REPMBUF;
367 		}
368 		if (rp->rc_flag & RC_NAM) {
369 			kfree(rp->rc_nam, M_SONAME);
370 			rp->rc_nam = NULL;
371 			rp->rc_flag &= ~RC_NAM;
372 		}
373 		kfree(rp, M_NFSD);
374 	}
375 	numnfsrvcache = 0;
376 	lwkt_reltoken(&srvcache_token);
377 }
378 
379 #endif /* NFS_NOSERVER */
380