xref: /original-bsd/sys/kern/vfs_vnops.c (revision 0ac4996f)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)vfs_vnops.c	8.11 (Berkeley) 05/14/95
13  */
14 
15 #include <sys/param.h>
16 #include <sys/systm.h>
17 #include <sys/kernel.h>
18 #include <sys/file.h>
19 #include <sys/stat.h>
20 #include <sys/buf.h>
21 #include <sys/proc.h>
22 #include <sys/mount.h>
23 #include <sys/namei.h>
24 #include <sys/vnode.h>
25 #include <sys/ioctl.h>
26 #include <sys/tty.h>
27 
28 #include <vm/vm.h>
29 
30 struct 	fileops vnops =
31 	{ vn_read, vn_write, vn_ioctl, vn_select, vn_closefile };
32 
33 /*
34  * Common code for vnode open operations.
35  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
36  */
37 vn_open(ndp, fmode, cmode)
38 	register struct nameidata *ndp;
39 	int fmode, cmode;
40 {
41 	register struct vnode *vp;
42 	register struct proc *p = ndp->ni_cnd.cn_proc;
43 	register struct ucred *cred = p->p_ucred;
44 	struct vattr vat;
45 	struct vattr *vap = &vat;
46 	int error;
47 
48 	if (fmode & O_CREAT) {
49 		ndp->ni_cnd.cn_nameiop = CREATE;
50 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
51 		if ((fmode & O_EXCL) == 0)
52 			ndp->ni_cnd.cn_flags |= FOLLOW;
53 		if (error = namei(ndp))
54 			return (error);
55 		if (ndp->ni_vp == NULL) {
56 			VATTR_NULL(vap);
57 			vap->va_type = VREG;
58 			vap->va_mode = cmode;
59 			if (fmode & O_EXCL)
60 				vap->va_vaflags |= VA_EXCLUSIVE;
61 			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
62 			if (error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
63 			    &ndp->ni_cnd, vap))
64 				return (error);
65 			fmode &= ~O_TRUNC;
66 			vp = ndp->ni_vp;
67 		} else {
68 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
69 			if (ndp->ni_dvp == ndp->ni_vp)
70 				vrele(ndp->ni_dvp);
71 			else
72 				vput(ndp->ni_dvp);
73 			ndp->ni_dvp = NULL;
74 			vp = ndp->ni_vp;
75 			if (fmode & O_EXCL) {
76 				error = EEXIST;
77 				goto bad;
78 			}
79 			fmode &= ~O_CREAT;
80 		}
81 	} else {
82 		ndp->ni_cnd.cn_nameiop = LOOKUP;
83 		ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF;
84 		if (error = namei(ndp))
85 			return (error);
86 		vp = ndp->ni_vp;
87 	}
88 	if (vp->v_type == VSOCK) {
89 		error = EOPNOTSUPP;
90 		goto bad;
91 	}
92 	if ((fmode & O_CREAT) == 0) {
93 		if (fmode & FREAD) {
94 			if (error = VOP_ACCESS(vp, VREAD, cred, p))
95 				goto bad;
96 		}
97 		if (fmode & (FWRITE | O_TRUNC)) {
98 			if (vp->v_type == VDIR) {
99 				error = EISDIR;
100 				goto bad;
101 			}
102 			if ((error = vn_writechk(vp)) ||
103 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)))
104 				goto bad;
105 		}
106 	}
107 	if (fmode & O_TRUNC) {
108 		VOP_UNLOCK(vp, 0, p);				/* XXX */
109 		VOP_LEASE(vp, p, cred, LEASE_WRITE);
110 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);	/* XXX */
111 		VATTR_NULL(vap);
112 		vap->va_size = 0;
113 		if (error = VOP_SETATTR(vp, vap, cred, p))
114 			goto bad;
115 	}
116 	if (error = VOP_OPEN(vp, fmode, cred, p))
117 		goto bad;
118 	if (fmode & FWRITE)
119 		vp->v_writecount++;
120 	return (0);
121 bad:
122 	vput(vp);
123 	return (error);
124 }
125 
126 /*
127  * Check for write permissions on the specified vnode.
128  * The read-only status of the file system is checked.
129  * Also, prototype text segments cannot be written.
130  */
131 vn_writechk(vp)
132 	register struct vnode *vp;
133 {
134 
135 	/*
136 	 * Disallow write attempts on read-only file systems;
137 	 * unless the file is a socket or a block or character
138 	 * device resident on the file system.
139 	 */
140 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
141 		switch (vp->v_type) {
142 		case VREG: case VDIR: case VLNK:
143 			return (EROFS);
144 		}
145 	}
146 	/*
147 	 * If there's shared text associated with
148 	 * the vnode, try to free it up once.  If
149 	 * we fail, we can't allow writing.
150 	 */
151 	if ((vp->v_flag & VTEXT) && !vnode_pager_uncache(vp))
152 		return (ETXTBSY);
153 	return (0);
154 }
155 
156 /*
157  * Vnode close call
158  */
159 vn_close(vp, flags, cred, p)
160 	register struct vnode *vp;
161 	int flags;
162 	struct ucred *cred;
163 	struct proc *p;
164 {
165 	int error;
166 
167 	if (flags & FWRITE)
168 		vp->v_writecount--;
169 	error = VOP_CLOSE(vp, flags, cred, p);
170 	vrele(vp);
171 	return (error);
172 }
173 
174 /*
175  * Package up an I/O request on a vnode into a uio and do it.
176  */
177 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
178 	enum uio_rw rw;
179 	struct vnode *vp;
180 	caddr_t base;
181 	int len;
182 	off_t offset;
183 	enum uio_seg segflg;
184 	int ioflg;
185 	struct ucred *cred;
186 	int *aresid;
187 	struct proc *p;
188 {
189 	struct uio auio;
190 	struct iovec aiov;
191 	int error;
192 
193 	if ((ioflg & IO_NODELOCKED) == 0)
194 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
195 	auio.uio_iov = &aiov;
196 	auio.uio_iovcnt = 1;
197 	aiov.iov_base = base;
198 	aiov.iov_len = len;
199 	auio.uio_resid = len;
200 	auio.uio_offset = offset;
201 	auio.uio_segflg = segflg;
202 	auio.uio_rw = rw;
203 	auio.uio_procp = p;
204 	if (rw == UIO_READ) {
205 		error = VOP_READ(vp, &auio, ioflg, cred);
206 	} else {
207 		error = VOP_WRITE(vp, &auio, ioflg, cred);
208 	}
209 	if (aresid)
210 		*aresid = auio.uio_resid;
211 	else
212 		if (auio.uio_resid && error == 0)
213 			error = EIO;
214 	if ((ioflg & IO_NODELOCKED) == 0)
215 		VOP_UNLOCK(vp, 0, p);
216 	return (error);
217 }
218 
219 /*
220  * File table vnode read routine.
221  */
222 vn_read(fp, uio, cred)
223 	struct file *fp;
224 	struct uio *uio;
225 	struct ucred *cred;
226 {
227 	struct vnode *vp = (struct vnode *)fp->f_data;
228 	struct proc *p = uio->uio_procp;
229 	int count, error;
230 
231 	VOP_LEASE(vp, p, cred, LEASE_READ);
232 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
233 	uio->uio_offset = fp->f_offset;
234 	count = uio->uio_resid;
235 	error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0,
236 		cred);
237 	fp->f_offset += count - uio->uio_resid;
238 	VOP_UNLOCK(vp, 0, p);
239 	return (error);
240 }
241 
242 /*
243  * File table vnode write routine.
244  */
245 vn_write(fp, uio, cred)
246 	struct file *fp;
247 	struct uio *uio;
248 	struct ucred *cred;
249 {
250 	struct vnode *vp = (struct vnode *)fp->f_data;
251 	struct proc *p = uio->uio_procp;
252 	int count, error, ioflag = IO_UNIT;
253 
254 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
255 		ioflag |= IO_APPEND;
256 	if (fp->f_flag & FNONBLOCK)
257 		ioflag |= IO_NDELAY;
258 	if ((fp->f_flag & O_FSYNC) || (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
259 		ioflag |= IO_SYNC;
260 	VOP_LEASE(vp, p, cred, LEASE_WRITE);
261 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
262 	uio->uio_offset = fp->f_offset;
263 	count = uio->uio_resid;
264 	error = VOP_WRITE(vp, uio, ioflag, cred);
265 	if (ioflag & IO_APPEND)
266 		fp->f_offset = uio->uio_offset;
267 	else
268 		fp->f_offset += count - uio->uio_resid;
269 	VOP_UNLOCK(vp, 0, p);
270 	return (error);
271 }
272 
273 /*
274  * File table vnode stat routine.
275  */
276 vn_stat(vp, sb, p)
277 	struct vnode *vp;
278 	register struct stat *sb;
279 	struct proc *p;
280 {
281 	struct vattr vattr;
282 	register struct vattr *vap;
283 	int error;
284 	u_short mode;
285 
286 	vap = &vattr;
287 	error = VOP_GETATTR(vp, vap, p->p_ucred, p);
288 	if (error)
289 		return (error);
290 	/*
291 	 * Copy from vattr table
292 	 */
293 	sb->st_dev = vap->va_fsid;
294 	sb->st_ino = vap->va_fileid;
295 	mode = vap->va_mode;
296 	switch (vp->v_type) {
297 	case VREG:
298 		mode |= S_IFREG;
299 		break;
300 	case VDIR:
301 		mode |= S_IFDIR;
302 		break;
303 	case VBLK:
304 		mode |= S_IFBLK;
305 		break;
306 	case VCHR:
307 		mode |= S_IFCHR;
308 		break;
309 	case VLNK:
310 		mode |= S_IFLNK;
311 		break;
312 	case VSOCK:
313 		mode |= S_IFSOCK;
314 		break;
315 	case VFIFO:
316 		mode |= S_IFIFO;
317 		break;
318 	default:
319 		return (EBADF);
320 	};
321 	sb->st_mode = mode;
322 	sb->st_nlink = vap->va_nlink;
323 	sb->st_uid = vap->va_uid;
324 	sb->st_gid = vap->va_gid;
325 	sb->st_rdev = vap->va_rdev;
326 	sb->st_size = vap->va_size;
327 	sb->st_atimespec = vap->va_atime;
328 	sb->st_mtimespec = vap->va_mtime;
329 	sb->st_ctimespec = vap->va_ctime;
330 	sb->st_blksize = vap->va_blocksize;
331 	sb->st_flags = vap->va_flags;
332 	sb->st_gen = vap->va_gen;
333 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
334 	return (0);
335 }
336 
337 /*
338  * File table vnode ioctl routine.
339  */
340 vn_ioctl(fp, com, data, p)
341 	struct file *fp;
342 	u_long com;
343 	caddr_t data;
344 	struct proc *p;
345 {
346 	register struct vnode *vp = ((struct vnode *)fp->f_data);
347 	struct vattr vattr;
348 	int error;
349 
350 	switch (vp->v_type) {
351 
352 	case VREG:
353 	case VDIR:
354 		if (com == FIONREAD) {
355 			if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p))
356 				return (error);
357 			*(int *)data = vattr.va_size - fp->f_offset;
358 			return (0);
359 		}
360 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
361 			return (0);			/* XXX */
362 		/* fall into ... */
363 
364 	default:
365 		return (ENOTTY);
366 
367 	case VFIFO:
368 	case VCHR:
369 	case VBLK:
370 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
371 		if (error == 0 && com == TIOCSCTTY) {
372 			if (p->p_session->s_ttyvp)
373 				vrele(p->p_session->s_ttyvp);
374 			p->p_session->s_ttyvp = vp;
375 			VREF(vp);
376 		}
377 		return (error);
378 	}
379 }
380 
381 /*
382  * File table vnode select routine.
383  */
384 vn_select(fp, which, p)
385 	struct file *fp;
386 	int which;
387 	struct proc *p;
388 {
389 
390 	return (VOP_SELECT(((struct vnode *)fp->f_data), which, fp->f_flag,
391 		fp->f_cred, p));
392 }
393 
394 /*
395  * Check that the vnode is still valid, and if so
396  * acquire requested lock.
397  */
398 int
399 vn_lock(vp, flags, p)
400 	struct vnode *vp;
401 	int flags;
402 	struct proc *p;
403 {
404 	int error;
405 
406 	do {
407 		if ((flags & LK_INTERLOCK) == 0)
408 			simple_lock(&vp->v_interlock);
409 		if (vp->v_flag & VXLOCK) {
410 			vp->v_flag |= VXWANT;
411 			simple_unlock(&vp->v_interlock);
412 			tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
413 			error = ENOENT;
414 		} else {
415 			error = VOP_LOCK(vp, flags | LK_INTERLOCK, p);
416 			if (error == 0)
417 				return (error);
418 #ifdef DEBUG
419 			if (error == EWOULDBLOCK)
420 				panic("vn_lock: hung lock");
421 #endif
422 		}
423 		flags &= ~LK_INTERLOCK;
424 	} while (flags & LK_RETRY);
425 	return (error);
426 }
427 
428 /*
429  * File table vnode close routine.
430  */
431 vn_closefile(fp, p)
432 	struct file *fp;
433 	struct proc *p;
434 {
435 
436 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
437 		fp->f_cred, p));
438 }
439