xref: /dragonfly/sys/vfs/nfs/nfs_srvcache.c (revision c03f08f3)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)nfs_srvcache.c	8.3 (Berkeley) 3/30/95
37  * $FreeBSD: src/sys/nfs/nfs_srvcache.c,v 1.21 2000/02/13 03:32:06 peter Exp $
38  * $DragonFly: src/sys/vfs/nfs/nfs_srvcache.c,v 1.12 2006/09/05 03:48:13 dillon Exp $
39  */
40 
41 /*
42  * Reference: Chet Juszczak, "Improving the Performance and Correctness
43  *		of an NFS Server", in Proc. Winter 1989 USENIX Conference,
44  *		pages 53-63. San Diego, February 1989.
45  */
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/systm.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>	/* for dup_sockaddr */
53 
54 #include <netinet/in.h>
55 #include "rpcv2.h"
56 #include "nfsproto.h"
57 #include "nfs.h"
58 #include "nfsrvcache.h"
59 
60 #ifndef NFS_NOSERVER
61 extern struct nfsstats nfsstats;
62 extern int nfsv2_procid[NFS_NPROCS];
63 static long numnfsrvcache;
64 static long desirednfsrvcache = NFSRVCACHESIZ;
65 
66 #define	NFSRCHASH(xid) \
67 	(&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
68 static LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
69 static TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
70 static u_long nfsrvhash;
71 
72 #define TRUE	1
73 #define	FALSE	0
74 
75 #define	NETFAMILY(rp) \
76 		(((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
77 
78 /*
79  * Static array that defines which nfs rpc's are nonidempotent
80  */
81 static int nonidempotent[NFS_NPROCS] = {
82 	FALSE,
83 	FALSE,
84 	TRUE,
85 	FALSE,
86 	FALSE,
87 	FALSE,
88 	FALSE,
89 	TRUE,
90 	TRUE,
91 	TRUE,
92 	TRUE,
93 	TRUE,
94 	TRUE,
95 	TRUE,
96 	TRUE,
97 	TRUE,
98 	FALSE,
99 	FALSE,
100 	FALSE,
101 	FALSE,
102 	FALSE,
103 	FALSE,
104 	FALSE,
105 	FALSE,
106 	FALSE,
107 	FALSE,
108 };
109 
110 /* True iff the rpc reply is an nfs status ONLY! */
111 static int nfsv2_repstat[NFS_NPROCS] = {
112 	FALSE,
113 	FALSE,
114 	FALSE,
115 	FALSE,
116 	FALSE,
117 	FALSE,
118 	FALSE,
119 	FALSE,
120 	FALSE,
121 	FALSE,
122 	TRUE,
123 	TRUE,
124 	TRUE,
125 	TRUE,
126 	FALSE,
127 	TRUE,
128 	FALSE,
129 	FALSE,
130 };
131 
132 /*
133  * Initialize the server request cache list
134  */
135 void
136 nfsrv_initcache(void)
137 {
138 
139 	nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, &nfsrvhash);
140 	TAILQ_INIT(&nfsrvlruhead);
141 }
142 
143 /*
144  * Look for the request in the cache
145  * If found then
146  *    return action and optionally reply
147  * else
148  *    insert it in the cache
149  *
150  * The rules are as follows:
151  * - if in progress, return DROP request
152  * - if completed within DELAY of the current time, return DROP it
153  * - if completed a longer time ago return REPLY if the reply was cached or
154  *   return DOIT
155  * Update/add new request at end of lru list
156  */
157 int
158 nfsrv_getcache(struct nfsrv_descript *nd, struct nfssvc_sock *slp,
159 	       struct mbuf **repp)
160 {
161 	struct nfsrvcache *rp;
162 	struct mbuf *mb;
163 	struct sockaddr_in *saddr;
164 	caddr_t bpos;
165 	int ret;
166 
167 	/*
168 	 * Don't cache recent requests for reliable transport protocols.
169 	 * (Maybe we should for the case of a reconnect, but..)
170 	 */
171 	if (!nd->nd_nam2)
172 		return (RC_DOIT);
173 loop:
174 	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
175 	    rp = rp->rc_hash.le_next) {
176 	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
177 		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
178 		        NFS_DPF(RC, ("H%03x", rp->rc_xid & 0xfff));
179 			if ((rp->rc_flag & RC_LOCKED) != 0) {
180 				rp->rc_flag |= RC_WANTED;
181 				(void) tsleep((caddr_t)rp, 0, "nfsrc", 0);
182 				goto loop;
183 			}
184 			rp->rc_flag |= RC_LOCKED;
185 			/* If not at end of LRU chain, move it there */
186 			if (TAILQ_NEXT(rp, rc_lru) != NULL) {
187 				TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
188 				TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
189 			}
190 			if (rp->rc_state == RC_UNUSED)
191 				panic("nfsrv cache");
192 			if (rp->rc_state == RC_INPROG) {
193 				nfsstats.srvcache_inproghits++;
194 				ret = RC_DROPIT;
195 			} else if (rp->rc_flag & RC_REPSTATUS) {
196 				nfsstats.srvcache_nonidemdonehits++;
197 				nfs_rephead(0, nd, slp, rp->rc_status,
198 					    repp, &mb, &bpos);
199 				ret = RC_REPLY;
200 			} else if (rp->rc_flag & RC_REPMBUF) {
201 				nfsstats.srvcache_nonidemdonehits++;
202 				*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
203 						MB_WAIT);
204 				ret = RC_REPLY;
205 			} else {
206 				nfsstats.srvcache_idemdonehits++;
207 				rp->rc_state = RC_INPROG;
208 				ret = RC_DOIT;
209 			}
210 			rp->rc_flag &= ~RC_LOCKED;
211 			if (rp->rc_flag & RC_WANTED) {
212 				rp->rc_flag &= ~RC_WANTED;
213 				wakeup((caddr_t)rp);
214 			}
215 			return (ret);
216 		}
217 	}
218 	nfsstats.srvcache_misses++;
219 	NFS_DPF(RC, ("M%03x", nd->nd_retxid & 0xfff));
220 	if (numnfsrvcache < desirednfsrvcache) {
221 		rp = (struct nfsrvcache *)kmalloc((u_long)sizeof *rp,
222 		    M_NFSD, M_WAITOK);
223 		bzero((char *)rp, sizeof *rp);
224 		numnfsrvcache++;
225 		rp->rc_flag = RC_LOCKED;
226 	} else {
227 		rp = TAILQ_FIRST(&nfsrvlruhead);
228 		while ((rp->rc_flag & RC_LOCKED) != 0) {
229 			rp->rc_flag |= RC_WANTED;
230 			(void) tsleep((caddr_t)rp, 0, "nfsrc", 0);
231 			rp = TAILQ_FIRST(&nfsrvlruhead);
232 		}
233 		rp->rc_flag |= RC_LOCKED;
234 		LIST_REMOVE(rp, rc_hash);
235 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
236 		if (rp->rc_flag & RC_REPMBUF)
237 			m_freem(rp->rc_reply);
238 		if (rp->rc_flag & RC_NAM)
239 			FREE(rp->rc_nam, M_SONAME);
240 		rp->rc_flag &= (RC_LOCKED | RC_WANTED);
241 	}
242 	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
243 	rp->rc_state = RC_INPROG;
244 	rp->rc_xid = nd->nd_retxid;
245 	saddr = (struct sockaddr_in *)nd->nd_nam;
246 	switch (saddr->sin_family) {
247 	case AF_INET:
248 		rp->rc_flag |= RC_INETADDR;
249 		rp->rc_inetaddr = saddr->sin_addr.s_addr;
250 		break;
251 	case AF_ISO:
252 	default:
253 		rp->rc_flag |= RC_NAM;
254 		rp->rc_nam = dup_sockaddr(nd->nd_nam);
255 		break;
256 	};
257 	rp->rc_proc = nd->nd_procnum;
258 	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
259 	rp->rc_flag &= ~RC_LOCKED;
260 	if (rp->rc_flag & RC_WANTED) {
261 		rp->rc_flag &= ~RC_WANTED;
262 		wakeup((caddr_t)rp);
263 	}
264 	return (RC_DOIT);
265 }
266 
267 /*
268  * Update a request cache entry after the rpc has been done
269  */
270 void
271 nfsrv_updatecache(struct nfsrv_descript *nd, int repvalid, struct mbuf *repmbuf)
272 {
273 	struct nfsrvcache *rp;
274 
275 	if (!nd->nd_nam2)
276 		return;
277 loop:
278 	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
279 	    rp = rp->rc_hash.le_next) {
280 	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
281 		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
282 			NFS_DPF(RC, ("U%03x", rp->rc_xid & 0xfff));
283 			if ((rp->rc_flag & RC_LOCKED) != 0) {
284 				rp->rc_flag |= RC_WANTED;
285 				(void) tsleep((caddr_t)rp, 0, "nfsrc", 0);
286 				goto loop;
287 			}
288 			rp->rc_flag |= RC_LOCKED;
289 			if (rp->rc_state == RC_DONE) {
290 				/*
291 				 * This can occur if the cache is too small.
292 				 * Retransmits of the same request aren't
293 				 * dropped so we may see the operation
294 				 * complete more then once.
295 				 */
296 				if (rp->rc_flag & RC_REPMBUF) {
297 					m_freem(rp->rc_reply);
298 					rp->rc_flag &= ~RC_REPMBUF;
299 				}
300 			}
301 			rp->rc_state = RC_DONE;
302 			/*
303 			 * If we have a valid reply update status and save
304 			 * the reply for non-idempotent rpc's.
305 			 */
306 			if (repvalid && nonidempotent[nd->nd_procnum]) {
307 				if ((nd->nd_flag & ND_NFSV3) == 0 &&
308 				  nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
309 					rp->rc_status = nd->nd_repstat;
310 					rp->rc_flag |= RC_REPSTATUS;
311 				} else {
312 					rp->rc_reply = m_copym(repmbuf,
313 						0, M_COPYALL, MB_WAIT);
314 					rp->rc_flag |= RC_REPMBUF;
315 				}
316 			}
317 			rp->rc_flag &= ~RC_LOCKED;
318 			if (rp->rc_flag & RC_WANTED) {
319 				rp->rc_flag &= ~RC_WANTED;
320 				wakeup((caddr_t)rp);
321 			}
322 			return;
323 		}
324 	}
325 	NFS_DPF(RC, ("L%03x", nd->nd_retxid & 0xfff));
326 }
327 
328 /*
329  * Clean out the cache. Called when the last nfsd terminates.
330  */
331 void
332 nfsrv_cleancache(void)
333 {
334 	struct nfsrvcache *rp, *nextrp;
335 
336 	TAILQ_FOREACH_MUTABLE(rp, &nfsrvlruhead, rc_lru, nextrp) {
337 		LIST_REMOVE(rp, rc_hash);
338 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
339 		if (rp->rc_flag & RC_REPMBUF)
340 			m_freem(rp->rc_reply);
341 		if (rp->rc_flag & RC_NAM)
342 			kfree(rp->rc_nam, M_SONAME);
343 		kfree(rp, M_NFSD);
344 	}
345 	numnfsrvcache = 0;
346 }
347 
348 #endif /* NFS_NOSERVER */
349