xref: /original-bsd/sys/kern/vfs_vnops.c (revision c4f3b704)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)vfs_vnops.c	8.13 (Berkeley) 05/27/95
13  */
14 
15 #include <sys/param.h>
16 #include <sys/systm.h>
17 #include <sys/kernel.h>
18 #include <sys/file.h>
19 #include <sys/stat.h>
20 #include <sys/buf.h>
21 #include <sys/proc.h>
22 #include <sys/mount.h>
23 #include <sys/namei.h>
24 #include <sys/vnode.h>
25 #include <sys/ioctl.h>
26 #include <sys/tty.h>
27 
28 #include <vm/vm.h>
29 
30 struct 	fileops vnops =
31 	{ vn_read, vn_write, vn_ioctl, vn_select, vn_closefile };
32 
33 /*
34  * Common code for vnode open operations.
35  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
36  */
37 vn_open(ndp, fmode, cmode)
38 	register struct nameidata *ndp;
39 	int fmode, cmode;
40 {
41 	register struct vnode *vp;
42 	register struct proc *p = ndp->ni_cnd.cn_proc;
43 	register struct ucred *cred = p->p_ucred;
44 	struct vattr vat;
45 	struct vattr *vap = &vat;
46 	int error;
47 
48 	if (fmode & O_CREAT) {
49 		ndp->ni_cnd.cn_nameiop = CREATE;
50 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
51 		if ((fmode & O_EXCL) == 0)
52 			ndp->ni_cnd.cn_flags |= FOLLOW;
53 		if (error = namei(ndp))
54 			return (error);
55 		if (ndp->ni_vp == NULL) {
56 			VATTR_NULL(vap);
57 			vap->va_type = VREG;
58 			vap->va_mode = cmode;
59 			if (fmode & O_EXCL)
60 				vap->va_vaflags |= VA_EXCLUSIVE;
61 			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
62 			if (error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
63 			    &ndp->ni_cnd, vap))
64 				return (error);
65 			fmode &= ~O_TRUNC;
66 			vp = ndp->ni_vp;
67 		} else {
68 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
69 			if (ndp->ni_dvp == ndp->ni_vp)
70 				vrele(ndp->ni_dvp);
71 			else
72 				vput(ndp->ni_dvp);
73 			ndp->ni_dvp = NULL;
74 			vp = ndp->ni_vp;
75 			if (fmode & O_EXCL) {
76 				error = EEXIST;
77 				goto bad;
78 			}
79 			fmode &= ~O_CREAT;
80 		}
81 	} else {
82 		ndp->ni_cnd.cn_nameiop = LOOKUP;
83 		ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF;
84 		if (error = namei(ndp))
85 			return (error);
86 		vp = ndp->ni_vp;
87 	}
88 	if (vp->v_type == VSOCK) {
89 		error = EOPNOTSUPP;
90 		goto bad;
91 	}
92 	if ((fmode & O_CREAT) == 0) {
93 		if (fmode & FREAD) {
94 			if (error = VOP_ACCESS(vp, VREAD, cred, p))
95 				goto bad;
96 		}
97 		if (fmode & (FWRITE | O_TRUNC)) {
98 			if (vp->v_type == VDIR) {
99 				error = EISDIR;
100 				goto bad;
101 			}
102 			if ((error = vn_writechk(vp)) ||
103 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)))
104 				goto bad;
105 		}
106 	}
107 	if (fmode & O_TRUNC) {
108 		VOP_UNLOCK(vp, 0, p);				/* XXX */
109 		VOP_LEASE(vp, p, cred, LEASE_WRITE);
110 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);	/* XXX */
111 		VATTR_NULL(vap);
112 		vap->va_size = 0;
113 		if (error = VOP_SETATTR(vp, vap, cred, p))
114 			goto bad;
115 	}
116 	if (error = VOP_OPEN(vp, fmode, cred, p))
117 		goto bad;
118 	if (fmode & FWRITE)
119 		vp->v_writecount++;
120 	return (0);
121 bad:
122 	vput(vp);
123 	return (error);
124 }
125 
126 /*
127  * Check for write permissions on the specified vnode.
128  * Prototype text segments cannot be written.
129  */
130 vn_writechk(vp)
131 	register struct vnode *vp;
132 {
133 
134 	/*
135 	 * If there's shared text associated with
136 	 * the vnode, try to free it up once.  If
137 	 * we fail, we can't allow writing.
138 	 */
139 	if ((vp->v_flag & VTEXT) && !vnode_pager_uncache(vp))
140 		return (ETXTBSY);
141 	return (0);
142 }
143 
144 /*
145  * Vnode close call
146  */
147 vn_close(vp, flags, cred, p)
148 	register struct vnode *vp;
149 	int flags;
150 	struct ucred *cred;
151 	struct proc *p;
152 {
153 	int error;
154 
155 	if (flags & FWRITE)
156 		vp->v_writecount--;
157 	error = VOP_CLOSE(vp, flags, cred, p);
158 	vrele(vp);
159 	return (error);
160 }
161 
162 /*
163  * Package up an I/O request on a vnode into a uio and do it.
164  */
165 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
166 	enum uio_rw rw;
167 	struct vnode *vp;
168 	caddr_t base;
169 	int len;
170 	off_t offset;
171 	enum uio_seg segflg;
172 	int ioflg;
173 	struct ucred *cred;
174 	int *aresid;
175 	struct proc *p;
176 {
177 	struct uio auio;
178 	struct iovec aiov;
179 	int error;
180 
181 	if ((ioflg & IO_NODELOCKED) == 0)
182 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
183 	auio.uio_iov = &aiov;
184 	auio.uio_iovcnt = 1;
185 	aiov.iov_base = base;
186 	aiov.iov_len = len;
187 	auio.uio_resid = len;
188 	auio.uio_offset = offset;
189 	auio.uio_segflg = segflg;
190 	auio.uio_rw = rw;
191 	auio.uio_procp = p;
192 	if (rw == UIO_READ) {
193 		error = VOP_READ(vp, &auio, ioflg, cred);
194 	} else {
195 		error = VOP_WRITE(vp, &auio, ioflg, cred);
196 	}
197 	if (aresid)
198 		*aresid = auio.uio_resid;
199 	else
200 		if (auio.uio_resid && error == 0)
201 			error = EIO;
202 	if ((ioflg & IO_NODELOCKED) == 0)
203 		VOP_UNLOCK(vp, 0, p);
204 	return (error);
205 }
206 
207 /*
208  * File table vnode read routine.
209  */
210 vn_read(fp, uio, cred)
211 	struct file *fp;
212 	struct uio *uio;
213 	struct ucred *cred;
214 {
215 	struct vnode *vp = (struct vnode *)fp->f_data;
216 	struct proc *p = uio->uio_procp;
217 	int count, error;
218 
219 	VOP_LEASE(vp, p, cred, LEASE_READ);
220 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
221 	uio->uio_offset = fp->f_offset;
222 	count = uio->uio_resid;
223 	error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0,
224 		cred);
225 	fp->f_offset += count - uio->uio_resid;
226 	VOP_UNLOCK(vp, 0, p);
227 	return (error);
228 }
229 
230 /*
231  * File table vnode write routine.
232  */
233 vn_write(fp, uio, cred)
234 	struct file *fp;
235 	struct uio *uio;
236 	struct ucred *cred;
237 {
238 	struct vnode *vp = (struct vnode *)fp->f_data;
239 	struct proc *p = uio->uio_procp;
240 	int count, error, ioflag = IO_UNIT;
241 
242 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
243 		ioflag |= IO_APPEND;
244 	if (fp->f_flag & FNONBLOCK)
245 		ioflag |= IO_NDELAY;
246 	if ((fp->f_flag & O_FSYNC) || (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
247 		ioflag |= IO_SYNC;
248 	VOP_LEASE(vp, p, cred, LEASE_WRITE);
249 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
250 	uio->uio_offset = fp->f_offset;
251 	count = uio->uio_resid;
252 	error = VOP_WRITE(vp, uio, ioflag, cred);
253 	if (ioflag & IO_APPEND)
254 		fp->f_offset = uio->uio_offset;
255 	else
256 		fp->f_offset += count - uio->uio_resid;
257 	VOP_UNLOCK(vp, 0, p);
258 	return (error);
259 }
260 
261 /*
262  * File table vnode stat routine.
263  */
264 vn_stat(vp, sb, p)
265 	struct vnode *vp;
266 	register struct stat *sb;
267 	struct proc *p;
268 {
269 	struct vattr vattr;
270 	register struct vattr *vap;
271 	int error;
272 	u_short mode;
273 
274 	vap = &vattr;
275 	error = VOP_GETATTR(vp, vap, p->p_ucred, p);
276 	if (error)
277 		return (error);
278 	/*
279 	 * Copy from vattr table
280 	 */
281 	sb->st_dev = vap->va_fsid;
282 	sb->st_ino = vap->va_fileid;
283 	mode = vap->va_mode;
284 	switch (vp->v_type) {
285 	case VREG:
286 		mode |= S_IFREG;
287 		break;
288 	case VDIR:
289 		mode |= S_IFDIR;
290 		break;
291 	case VBLK:
292 		mode |= S_IFBLK;
293 		break;
294 	case VCHR:
295 		mode |= S_IFCHR;
296 		break;
297 	case VLNK:
298 		mode |= S_IFLNK;
299 		break;
300 	case VSOCK:
301 		mode |= S_IFSOCK;
302 		break;
303 	case VFIFO:
304 		mode |= S_IFIFO;
305 		break;
306 	default:
307 		return (EBADF);
308 	};
309 	sb->st_mode = mode;
310 	sb->st_nlink = vap->va_nlink;
311 	sb->st_uid = vap->va_uid;
312 	sb->st_gid = vap->va_gid;
313 	sb->st_rdev = vap->va_rdev;
314 	sb->st_size = vap->va_size;
315 	sb->st_atimespec = vap->va_atime;
316 	sb->st_mtimespec = vap->va_mtime;
317 	sb->st_ctimespec = vap->va_ctime;
318 	sb->st_blksize = vap->va_blocksize;
319 	sb->st_flags = vap->va_flags;
320 	sb->st_gen = vap->va_gen;
321 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
322 	return (0);
323 }
324 
325 /*
326  * File table vnode ioctl routine.
327  */
328 vn_ioctl(fp, com, data, p)
329 	struct file *fp;
330 	u_long com;
331 	caddr_t data;
332 	struct proc *p;
333 {
334 	register struct vnode *vp = ((struct vnode *)fp->f_data);
335 	struct vattr vattr;
336 	int error;
337 
338 	switch (vp->v_type) {
339 
340 	case VREG:
341 	case VDIR:
342 		if (com == FIONREAD) {
343 			if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p))
344 				return (error);
345 			*(int *)data = vattr.va_size - fp->f_offset;
346 			return (0);
347 		}
348 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
349 			return (0);			/* XXX */
350 		/* fall into ... */
351 
352 	default:
353 		return (ENOTTY);
354 
355 	case VFIFO:
356 	case VCHR:
357 	case VBLK:
358 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
359 		if (error == 0 && com == TIOCSCTTY) {
360 			if (p->p_session->s_ttyvp)
361 				vrele(p->p_session->s_ttyvp);
362 			p->p_session->s_ttyvp = vp;
363 			VREF(vp);
364 		}
365 		return (error);
366 	}
367 }
368 
369 /*
370  * File table vnode select routine.
371  */
372 vn_select(fp, which, p)
373 	struct file *fp;
374 	int which;
375 	struct proc *p;
376 {
377 
378 	return (VOP_SELECT(((struct vnode *)fp->f_data), which, fp->f_flag,
379 		fp->f_cred, p));
380 }
381 
382 /*
383  * Check that the vnode is still valid, and if so
384  * acquire requested lock.
385  */
386 int
387 vn_lock(vp, flags, p)
388 	struct vnode *vp;
389 	int flags;
390 	struct proc *p;
391 {
392 	int error;
393 
394 	do {
395 		if ((flags & LK_INTERLOCK) == 0)
396 			simple_lock(&vp->v_interlock);
397 		if (vp->v_flag & VXLOCK) {
398 			vp->v_flag |= VXWANT;
399 			simple_unlock(&vp->v_interlock);
400 			tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
401 			error = ENOENT;
402 		} else {
403 			error = VOP_LOCK(vp, flags | LK_INTERLOCK, p);
404 			if (error == 0)
405 				return (error);
406 		}
407 		flags &= ~LK_INTERLOCK;
408 	} while (flags & LK_RETRY);
409 	return (error);
410 }
411 
412 /*
413  * File table vnode close routine.
414  */
415 vn_closefile(fp, p)
416 	struct file *fp;
417 	struct proc *p;
418 {
419 
420 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
421 		fp->f_cred, p));
422 }
423