xref: /openbsd/sys/nfs/nfs_kq.c (revision 771fbea0)
1 /*	$OpenBSD: nfs_kq.c,v 1.33 2020/12/25 12:59:53 visa Exp $ */
2 /*	$NetBSD: nfs_kq.c,v 1.7 2003/10/30 01:43:10 simonb Exp $	*/
3 
4 /*-
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jaromir Dolecek.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/proc.h>
37 #include <sys/mount.h>
38 #include <sys/malloc.h>
39 #include <sys/vnode.h>
40 #include <sys/unistd.h>
41 #include <sys/file.h>
42 #include <sys/kthread.h>
43 #include <sys/rwlock.h>
44 #include <sys/queue.h>
45 
46 #include <nfs/rpcv2.h>
47 #include <nfs/nfsproto.h>
48 #include <nfs/nfs.h>
49 #include <nfs/nfsnode.h>
50 #include <nfs/nfs_var.h>
51 
52 void	nfs_kqpoll(void *);
53 int	nfs_kqwatch(struct vnode *);
54 void	nfs_kqunwatch(struct vnode *);
55 
56 void	filt_nfsdetach(struct knote *);
57 int	filt_nfsread(struct knote *, long);
58 int	filt_nfswrite(struct knote *, long);
59 int	filt_nfsvnode(struct knote *, long);
60 
61 struct kevq {
62 	SLIST_ENTRY(kevq)	kev_link;
63 	struct vnode		*vp;
64 	u_int			usecount;
65 	u_int			flags;
66 #define KEVQ_BUSY	0x01	/* currently being processed */
67 #define KEVQ_WANT	0x02	/* want to change this entry */
68 	struct timespec		omtime;	/* old modification time */
69 	struct timespec		octime;	/* old change time */
70 	nlink_t			onlink;	/* old number of references to file */
71 };
72 SLIST_HEAD(kevqlist, kevq);
73 
74 struct rwlock nfskevq_lock = RWLOCK_INITIALIZER("nfskqlk");
75 struct proc *pnfskq;
76 struct kevqlist kevlist = SLIST_HEAD_INITIALIZER(kevlist);
77 
78 /*
79  * This quite simplistic routine periodically checks for server changes
80  * of any of the watched files every NFS_MINATTRTIMO/2 seconds.
81  * Only changes in size, modification time, change time and nlinks
82  * are being checked, everything else is ignored.
83  * The routine only calls VOP_GETATTR() when it's likely it would get
84  * some new data, i.e. when the vnode expires from attrcache. This
85  * should give same result as periodically running stat(2) from userland,
86  * while keeping CPU/network usage low, and still provide proper kevent
87  * semantics.
88  * The poller thread is created when first vnode is added to watch list,
89  * and exits when the watch list is empty. The overhead of thread creation
90  * isn't really important, neither speed of attach and detach of knote.
91  */
92 /* ARGSUSED */
93 void
94 nfs_kqpoll(void *arg)
95 {
96 	struct kevq *ke;
97 	struct vattr attr;
98 	struct proc *p = pnfskq;
99 	u_quad_t osize;
100 	int error;
101 
102 	for(;;) {
103 		rw_enter_write(&nfskevq_lock);
104 		SLIST_FOREACH(ke, &kevlist, kev_link) {
105 			struct nfsnode *np = VTONFS(ke->vp);
106 
107 #ifdef DEBUG
108 			printf("nfs_kqpoll on: ");
109 			VOP_PRINT(ke->vp);
110 #endif
111 			/* skip if still in attrcache */
112 			if (nfs_getattrcache(ke->vp, &attr) != ENOENT)
113 				continue;
114 
115 			/*
116 			 * Mark entry busy, release lock and check
117 			 * for changes.
118 			 */
119 			ke->flags |= KEVQ_BUSY;
120 			rw_exit_write(&nfskevq_lock);
121 
122 			/* save v_size, nfs_getattr() updates it */
123 			osize = np->n_size;
124 
125 			error = VOP_GETATTR(ke->vp, &attr, p->p_ucred, p);
126 			if (error == ESTALE) {
127 				NFS_INVALIDATE_ATTRCACHE(np);
128 				VN_KNOTE(ke->vp, NOTE_DELETE);
129 				goto next;
130 			}
131 
132 			/* following is a bit fragile, but about best
133 			 * we can get */
134 			if (attr.va_size != osize) {
135 				int flags = NOTE_WRITE;
136 
137 				if (attr.va_size > osize)
138 					flags |= NOTE_EXTEND;
139 				else
140 					flags |= NOTE_TRUNCATE;
141 
142 				VN_KNOTE(ke->vp, flags);
143 				ke->omtime = attr.va_mtime;
144 			} else if (attr.va_mtime.tv_sec != ke->omtime.tv_sec
145 			    || attr.va_mtime.tv_nsec != ke->omtime.tv_nsec) {
146 				VN_KNOTE(ke->vp, NOTE_WRITE);
147 				ke->omtime = attr.va_mtime;
148 			}
149 
150 			if (attr.va_ctime.tv_sec != ke->octime.tv_sec
151 			    || attr.va_ctime.tv_nsec != ke->octime.tv_nsec) {
152 				VN_KNOTE(ke->vp, NOTE_ATTRIB);
153 				ke->octime = attr.va_ctime;
154 			}
155 
156 			if (attr.va_nlink != ke->onlink) {
157 				VN_KNOTE(ke->vp, NOTE_LINK);
158 				ke->onlink = attr.va_nlink;
159 			}
160 
161 next:
162 			rw_enter_write(&nfskevq_lock);
163 			ke->flags &= ~KEVQ_BUSY;
164 			if (ke->flags & KEVQ_WANT) {
165 				ke->flags &= ~KEVQ_WANT;
166 				wakeup(ke);
167 			}
168 		}
169 
170 		if (SLIST_EMPTY(&kevlist)) {
171 			/* Nothing more to watch, exit */
172 			pnfskq = NULL;
173 			rw_exit_write(&nfskevq_lock);
174 			kthread_exit(0);
175 		}
176 		rw_exit_write(&nfskevq_lock);
177 
178 		/* wait a while before checking for changes again */
179 		tsleep_nsec(pnfskq, PSOCK, "nfskqpw",
180 		    SEC_TO_NSEC(NFS_MINATTRTIMO) / 2);
181 	}
182 }
183 
184 void
185 filt_nfsdetach(struct knote *kn)
186 {
187 	struct vnode *vp = (struct vnode *)kn->kn_hook;
188 
189 	klist_remove_locked(&vp->v_selectinfo.si_note, kn);
190 
191 	/* Remove the vnode from watch list */
192 	if ((kn->kn_flags & __EV_POLL) == 0)
193 		nfs_kqunwatch(vp);
194 }
195 
196 void
197 nfs_kqunwatch(struct vnode *vp)
198 {
199 	struct kevq *ke;
200 
201 	rw_enter_write(&nfskevq_lock);
202 	SLIST_FOREACH(ke, &kevlist, kev_link) {
203 		if (ke->vp == vp) {
204 			while (ke->flags & KEVQ_BUSY) {
205 				ke->flags |= KEVQ_WANT;
206 				rw_exit_write(&nfskevq_lock);
207 				tsleep_nsec(ke, PSOCK, "nfskqdet", INFSLP);
208 				rw_enter_write(&nfskevq_lock);
209 			}
210 
211 			if (ke->usecount > 1) {
212 				/* keep, other kevents need this */
213 				ke->usecount--;
214 			} else {
215 				/* last user, g/c */
216 				SLIST_REMOVE(&kevlist, ke, kevq, kev_link);
217 				free(ke, M_KEVENT, sizeof(*ke));
218 			}
219 			break;
220 		}
221 	}
222 	rw_exit_write(&nfskevq_lock);
223 }
224 
225 int
226 filt_nfsread(struct knote *kn, long hint)
227 {
228 	struct vnode *vp = (struct vnode *)kn->kn_hook;
229 	struct nfsnode *np = VTONFS(vp);
230 
231 	/*
232 	 * filesystem is gone, so set the EOF flag and schedule
233 	 * the knote for deletion.
234 	 */
235 	if (hint == NOTE_REVOKE) {
236 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
237 		return (1);
238 	}
239 
240 	kn->kn_data = np->n_size - foffset(kn->kn_fp);
241 #ifdef DEBUG
242 	printf("nfsread event. %lld\n", kn->kn_data);
243 #endif
244 	if (kn->kn_data == 0 && kn->kn_sfflags & NOTE_EOF) {
245 		kn->kn_fflags |= NOTE_EOF;
246 		return (1);
247 	}
248 
249 	if (kn->kn_flags & __EV_POLL)
250 		return (1);
251 
252         return (kn->kn_data != 0);
253 }
254 
255 int
256 filt_nfswrite(struct knote *kn, long hint)
257 {
258 	/*
259 	 * filesystem is gone, so set the EOF flag and schedule
260 	 * the knote for deletion.
261 	 */
262 	if (hint == NOTE_REVOKE) {
263 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
264 		return (1);
265 	}
266 
267 	kn->kn_data = 0;
268 	return (1);
269 }
270 
271 int
272 filt_nfsvnode(struct knote *kn, long hint)
273 {
274 	if (kn->kn_sfflags & hint)
275 		kn->kn_fflags |= hint;
276 	if (hint == NOTE_REVOKE) {
277 		kn->kn_flags |= EV_EOF;
278 		return (1);
279 	}
280 	return (kn->kn_fflags != 0);
281 }
282 
283 static const struct filterops nfsread_filtops = {
284 	.f_flags	= FILTEROP_ISFD,
285 	.f_attach	= NULL,
286 	.f_detach	= filt_nfsdetach,
287 	.f_event	= filt_nfsread,
288 };
289 
290 static const struct filterops nfswrite_filtops = {
291 	.f_flags	= FILTEROP_ISFD,
292 	.f_attach	= NULL,
293 	.f_detach	= filt_nfsdetach,
294 	.f_event	= filt_nfswrite,
295 };
296 
297 static const struct filterops nfsvnode_filtops = {
298 	.f_flags	= FILTEROP_ISFD,
299 	.f_attach	= NULL,
300 	.f_detach	= filt_nfsdetach,
301 	.f_event	= filt_nfsvnode,
302 };
303 
304 int
305 nfs_kqfilter(void *v)
306 {
307 	struct vop_kqfilter_args *ap = v;
308 	struct vnode *vp;
309 	struct knote *kn;
310 
311 	vp = ap->a_vp;
312 	kn = ap->a_kn;
313 
314 #ifdef DEBUG
315 	printf("nfs_kqfilter(%d) on: ", kn->kn_filter);
316 	VOP_PRINT(vp);
317 #endif
318 
319 	switch (kn->kn_filter) {
320 	case EVFILT_READ:
321 		kn->kn_fop = &nfsread_filtops;
322 		break;
323 	case EVFILT_WRITE:
324 		kn->kn_fop = &nfswrite_filtops;
325 		break;
326 	case EVFILT_VNODE:
327 		kn->kn_fop = &nfsvnode_filtops;
328 		break;
329 	default:
330 		return (EINVAL);
331 	}
332 
333 	kn->kn_hook = vp;
334 
335 	/*
336 	 * Put the vnode to watched list.
337 	 */
338 	if ((kn->kn_flags & __EV_POLL) == 0) {
339 		int error;
340 
341 		error = nfs_kqwatch(vp);
342 		if (error)
343 			return (error);
344 	}
345 
346 	klist_insert_locked(&vp->v_selectinfo.si_note, kn);
347 
348 	return (0);
349 }
350 
351 int
352 nfs_kqwatch(struct vnode *vp)
353 {
354 	struct proc *p = curproc;	/* XXX */
355 	struct vattr attr;
356 	struct kevq *ke;
357 	int error = 0;
358 
359 	/*
360 	 * Fetch current attributes. It's only needed when the vnode
361 	 * is not watched yet, but we need to do this without lock
362 	 * held. This is likely cheap due to attrcache, so do it now.
363 	 */
364 	memset(&attr, 0, sizeof(attr));
365 	(void) VOP_GETATTR(vp, &attr, p->p_ucred, p);
366 
367 	rw_enter_write(&nfskevq_lock);
368 
369 	/* ensure the poller is running */
370 	if (!pnfskq) {
371 		error = kthread_create(nfs_kqpoll, NULL, &pnfskq,
372 				"nfskqpoll");
373 		if (error)
374 			goto out;
375 	}
376 
377 	SLIST_FOREACH(ke, &kevlist, kev_link)
378 		if (ke->vp == vp)
379 			break;
380 
381 	if (ke) {
382 		/* already watched, so just bump usecount */
383 		ke->usecount++;
384 	} else {
385 		/* need a new one */
386 		ke = malloc(sizeof(*ke), M_KEVENT, M_WAITOK);
387 		ke->vp = vp;
388 		ke->usecount = 1;
389 		ke->flags = 0;
390 		ke->omtime = attr.va_mtime;
391 		ke->octime = attr.va_ctime;
392 		ke->onlink = attr.va_nlink;
393 		SLIST_INSERT_HEAD(&kevlist, ke, kev_link);
394 	}
395 
396 	/* kick the poller */
397 	wakeup(pnfskq);
398 
399 out:
400 	rw_exit_write(&nfskevq_lock);
401 	return (error);
402 }
403