xref: /dragonfly/sys/vfs/nfs/nfs_srvcache.c (revision 62f7f702)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)nfs_srvcache.c	8.3 (Berkeley) 3/30/95
37  * $FreeBSD: src/sys/nfs/nfs_srvcache.c,v 1.21 2000/02/13 03:32:06 peter Exp $
38  * $DragonFly: src/sys/vfs/nfs/nfs_srvcache.c,v 1.13 2008/01/05 14:02:41 swildner Exp $
39  */
40 
41 /*
42  * Reference: Chet Juszczak, "Improving the Performance and Correctness
43  *		of an NFS Server", in Proc. Winter 1989 USENIX Conference,
44  *		pages 53-63. San Diego, February 1989.
45  */
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/systm.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>	/* for dup_sockaddr */
53 
54 #include <netinet/in.h>
55 #include "rpcv2.h"
56 #include "nfsproto.h"
57 #include "nfs.h"
58 #include "nfsrvcache.h"
59 
60 #ifndef NFS_NOSERVER
61 extern struct nfsstats nfsstats;
62 extern int nfsv2_procid[NFS_NPROCS];
63 static long numnfsrvcache;
64 static long desirednfsrvcache = NFSRVCACHESIZ;
65 
66 #define	NFSRCHASH(xid) \
67 	(&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
68 static LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
69 static TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
70 static u_long nfsrvhash;
71 
72 #define TRUE	1
73 #define	FALSE	0
74 
75 #define	NETFAMILY(rp) \
76 		(((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
77 
78 /*
79  * Static array that defines which nfs rpc's are nonidempotent
80  */
81 static int nonidempotent[NFS_NPROCS] = {
82 	FALSE,
83 	FALSE,
84 	TRUE,
85 	FALSE,
86 	FALSE,
87 	FALSE,
88 	FALSE,
89 	TRUE,
90 	TRUE,
91 	TRUE,
92 	TRUE,
93 	TRUE,
94 	TRUE,
95 	TRUE,
96 	TRUE,
97 	TRUE,
98 	FALSE,
99 	FALSE,
100 	FALSE,
101 	FALSE,
102 	FALSE,
103 	FALSE,
104 	FALSE,
105 	FALSE,
106 	FALSE,
107 	FALSE,
108 };
109 
110 /* True iff the rpc reply is an nfs status ONLY! */
111 static int nfsv2_repstat[NFS_NPROCS] = {
112 	FALSE,
113 	FALSE,
114 	FALSE,
115 	FALSE,
116 	FALSE,
117 	FALSE,
118 	FALSE,
119 	FALSE,
120 	FALSE,
121 	FALSE,
122 	TRUE,
123 	TRUE,
124 	TRUE,
125 	TRUE,
126 	FALSE,
127 	TRUE,
128 	FALSE,
129 	FALSE,
130 };
131 
132 /*
133  * Initialize the server request cache list
134  */
135 void
136 nfsrv_initcache(void)
137 {
138 
139 	nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, &nfsrvhash);
140 	TAILQ_INIT(&nfsrvlruhead);
141 }
142 
143 /*
144  * Look for the request in the cache
145  * If found then
146  *    return action and optionally reply
147  * else
148  *    insert it in the cache
149  *
150  * The rules are as follows:
151  * - if in progress, return DROP request
152  * - if completed within DELAY of the current time, return DROP it
153  * - if completed a longer time ago return REPLY if the reply was cached or
154  *   return DOIT
155  * Update/add new request at end of lru list
156  */
157 int
158 nfsrv_getcache(struct nfsrv_descript *nd, struct nfssvc_sock *slp,
159 	       struct mbuf **repp)
160 {
161 	struct nfsrvcache *rp;
162 	struct mbuf *mb;
163 	struct sockaddr_in *saddr;
164 	caddr_t bpos;
165 	int ret;
166 
167 	/*
168 	 * Don't cache recent requests for reliable transport protocols.
169 	 * (Maybe we should for the case of a reconnect, but..)
170 	 */
171 	if (!nd->nd_nam2)
172 		return (RC_DOIT);
173 loop:
174 	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
175 	    rp = rp->rc_hash.le_next) {
176 	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
177 		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
178 		        NFS_DPF(RC, ("H%03x", rp->rc_xid & 0xfff));
179 			if ((rp->rc_flag & RC_LOCKED) != 0) {
180 				rp->rc_flag |= RC_WANTED;
181 				(void) tsleep((caddr_t)rp, 0, "nfsrc", 0);
182 				goto loop;
183 			}
184 			rp->rc_flag |= RC_LOCKED;
185 			/* If not at end of LRU chain, move it there */
186 			if (TAILQ_NEXT(rp, rc_lru) != NULL) {
187 				TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
188 				TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
189 			}
190 			if (rp->rc_state == RC_UNUSED)
191 				panic("nfsrv cache");
192 			if (rp->rc_state == RC_INPROG) {
193 				nfsstats.srvcache_inproghits++;
194 				ret = RC_DROPIT;
195 			} else if (rp->rc_flag & RC_REPSTATUS) {
196 				nfsstats.srvcache_nonidemdonehits++;
197 				nfs_rephead(0, nd, slp, rp->rc_status,
198 					    repp, &mb, &bpos);
199 				ret = RC_REPLY;
200 			} else if (rp->rc_flag & RC_REPMBUF) {
201 				nfsstats.srvcache_nonidemdonehits++;
202 				*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
203 						MB_WAIT);
204 				ret = RC_REPLY;
205 			} else {
206 				nfsstats.srvcache_idemdonehits++;
207 				rp->rc_state = RC_INPROG;
208 				ret = RC_DOIT;
209 			}
210 			rp->rc_flag &= ~RC_LOCKED;
211 			if (rp->rc_flag & RC_WANTED) {
212 				rp->rc_flag &= ~RC_WANTED;
213 				wakeup((caddr_t)rp);
214 			}
215 			return (ret);
216 		}
217 	}
218 	nfsstats.srvcache_misses++;
219 	NFS_DPF(RC, ("M%03x", nd->nd_retxid & 0xfff));
220 	if (numnfsrvcache < desirednfsrvcache) {
221 		rp = (struct nfsrvcache *)kmalloc((u_long)sizeof *rp,
222 		    M_NFSD, M_WAITOK | M_ZERO);
223 		numnfsrvcache++;
224 		rp->rc_flag = RC_LOCKED;
225 	} else {
226 		rp = TAILQ_FIRST(&nfsrvlruhead);
227 		while ((rp->rc_flag & RC_LOCKED) != 0) {
228 			rp->rc_flag |= RC_WANTED;
229 			(void) tsleep((caddr_t)rp, 0, "nfsrc", 0);
230 			rp = TAILQ_FIRST(&nfsrvlruhead);
231 		}
232 		rp->rc_flag |= RC_LOCKED;
233 		LIST_REMOVE(rp, rc_hash);
234 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
235 		if (rp->rc_flag & RC_REPMBUF)
236 			m_freem(rp->rc_reply);
237 		if (rp->rc_flag & RC_NAM)
238 			FREE(rp->rc_nam, M_SONAME);
239 		rp->rc_flag &= (RC_LOCKED | RC_WANTED);
240 	}
241 	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
242 	rp->rc_state = RC_INPROG;
243 	rp->rc_xid = nd->nd_retxid;
244 	saddr = (struct sockaddr_in *)nd->nd_nam;
245 	switch (saddr->sin_family) {
246 	case AF_INET:
247 		rp->rc_flag |= RC_INETADDR;
248 		rp->rc_inetaddr = saddr->sin_addr.s_addr;
249 		break;
250 	case AF_ISO:
251 	default:
252 		rp->rc_flag |= RC_NAM;
253 		rp->rc_nam = dup_sockaddr(nd->nd_nam);
254 		break;
255 	};
256 	rp->rc_proc = nd->nd_procnum;
257 	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
258 	rp->rc_flag &= ~RC_LOCKED;
259 	if (rp->rc_flag & RC_WANTED) {
260 		rp->rc_flag &= ~RC_WANTED;
261 		wakeup((caddr_t)rp);
262 	}
263 	return (RC_DOIT);
264 }
265 
266 /*
267  * Update a request cache entry after the rpc has been done
268  */
269 void
270 nfsrv_updatecache(struct nfsrv_descript *nd, int repvalid, struct mbuf *repmbuf)
271 {
272 	struct nfsrvcache *rp;
273 
274 	if (!nd->nd_nam2)
275 		return;
276 loop:
277 	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
278 	    rp = rp->rc_hash.le_next) {
279 	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
280 		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
281 			NFS_DPF(RC, ("U%03x", rp->rc_xid & 0xfff));
282 			if ((rp->rc_flag & RC_LOCKED) != 0) {
283 				rp->rc_flag |= RC_WANTED;
284 				(void) tsleep((caddr_t)rp, 0, "nfsrc", 0);
285 				goto loop;
286 			}
287 			rp->rc_flag |= RC_LOCKED;
288 			if (rp->rc_state == RC_DONE) {
289 				/*
290 				 * This can occur if the cache is too small.
291 				 * Retransmits of the same request aren't
292 				 * dropped so we may see the operation
293 				 * complete more then once.
294 				 */
295 				if (rp->rc_flag & RC_REPMBUF) {
296 					m_freem(rp->rc_reply);
297 					rp->rc_flag &= ~RC_REPMBUF;
298 				}
299 			}
300 			rp->rc_state = RC_DONE;
301 			/*
302 			 * If we have a valid reply update status and save
303 			 * the reply for non-idempotent rpc's.
304 			 */
305 			if (repvalid && nonidempotent[nd->nd_procnum]) {
306 				if ((nd->nd_flag & ND_NFSV3) == 0 &&
307 				  nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
308 					rp->rc_status = nd->nd_repstat;
309 					rp->rc_flag |= RC_REPSTATUS;
310 				} else {
311 					rp->rc_reply = m_copym(repmbuf,
312 						0, M_COPYALL, MB_WAIT);
313 					rp->rc_flag |= RC_REPMBUF;
314 				}
315 			}
316 			rp->rc_flag &= ~RC_LOCKED;
317 			if (rp->rc_flag & RC_WANTED) {
318 				rp->rc_flag &= ~RC_WANTED;
319 				wakeup((caddr_t)rp);
320 			}
321 			return;
322 		}
323 	}
324 	NFS_DPF(RC, ("L%03x", nd->nd_retxid & 0xfff));
325 }
326 
327 /*
328  * Clean out the cache. Called when the last nfsd terminates.
329  */
330 void
331 nfsrv_cleancache(void)
332 {
333 	struct nfsrvcache *rp, *nextrp;
334 
335 	TAILQ_FOREACH_MUTABLE(rp, &nfsrvlruhead, rc_lru, nextrp) {
336 		LIST_REMOVE(rp, rc_hash);
337 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
338 		if (rp->rc_flag & RC_REPMBUF)
339 			m_freem(rp->rc_reply);
340 		if (rp->rc_flag & RC_NAM)
341 			kfree(rp->rc_nam, M_SONAME);
342 		kfree(rp, M_NFSD);
343 	}
344 	numnfsrvcache = 0;
345 }
346 
347 #endif /* NFS_NOSERVER */
348