xref: /dragonfly/sys/vfs/hpfs/hpfs_vnops.c (revision 3f5e28f4)
1 /*-
2  * Copyright (c) 1998, 1999 Semen Ustimenko (semenu@FreeBSD.org)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/fs/hpfs/hpfs_vnops.c,v 1.2.2.2 2002/01/15 18:35:09 semenu Exp $
27  * $DragonFly: src/sys/vfs/hpfs/hpfs_vnops.c,v 1.41 2007/05/09 00:53:35 dillon Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/time.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <sys/vnode.h>
38 #include <sys/mount.h>
39 #include <sys/namei.h>
40 #include <sys/malloc.h>
41 #include <sys/buf.h>
42 #include <sys/dirent.h>
43 
44 #include <machine/limits.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #if !defined(__DragonFly__)
49 #include <vm/vm_prot.h>
50 #endif
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_zone.h>
55 #if defined(__DragonFly__)
56 #include <vm/vnode_pager.h>
57 #endif
58 #include <vm/vm_extern.h>
59 #include <sys/buf2.h>
60 
61 #if !defined(__DragonFly__)
62 #include <miscfs/specfs/specdev.h>
63 #include <miscfs/genfs/genfs.h>
64 #endif
65 
66 #include <sys/unistd.h> /* for pathconf(2) constants */
67 
68 #include "hpfs.h"
69 #include "hpfsmount.h"
70 #include "hpfs_subr.h"
71 #include "hpfs_ioctl.h"
72 
73 static int	hpfs_de_uiomove (int *, struct hpfsmount *,
74 				 struct hpfsdirent *, struct uio *);
75 static int	hpfs_ioctl (struct vop_ioctl_args *ap);
76 static int	hpfs_read (struct vop_read_args *);
77 static int	hpfs_write (struct vop_write_args *ap);
78 static int	hpfs_getattr (struct vop_getattr_args *ap);
79 static int	hpfs_setattr (struct vop_setattr_args *ap);
80 static int	hpfs_inactive (struct vop_inactive_args *ap);
81 static int	hpfs_print (struct vop_print_args *ap);
82 static int	hpfs_reclaim (struct vop_reclaim_args *ap);
83 static int	hpfs_strategy (struct vop_strategy_args *ap);
84 static int	hpfs_access (struct vop_access_args *ap);
85 static int	hpfs_readdir (struct vop_readdir_args *ap);
86 static int	hpfs_lookup (struct vop_old_lookup_args *ap);
87 static int	hpfs_create (struct vop_old_create_args *);
88 static int	hpfs_remove (struct vop_old_remove_args *);
89 static int	hpfs_bmap (struct vop_bmap_args *ap);
90 #if defined(__DragonFly__)
91 static int	hpfs_getpages (struct vop_getpages_args *ap);
92 static int	hpfs_putpages (struct vop_putpages_args *);
93 static int	hpfs_fsync (struct vop_fsync_args *ap);
94 #endif
95 static int	hpfs_pathconf (struct vop_pathconf_args *ap);
96 
97 #if defined(__DragonFly__)
98 int
99 hpfs_getpages(struct vop_getpages_args *ap)
100 {
101 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
102 		ap->a_reqpage);
103 }
104 
105 int
106 hpfs_putpages(struct vop_putpages_args *ap)
107 {
108 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
109 		ap->a_sync, ap->a_rtvals);
110 }
111 
112 /*
113  * hpfs_fsync(struct vnode *a_vp, struct ucred *a_cred, int a_waitfor,
114  *	      struct proc *a_td)
115  */
116 static int
117 hpfs_fsync(struct vop_fsync_args *ap)
118 {
119 	struct vnode *vp = ap->a_vp;
120 
121 	/*
122 	 * Flush all dirty buffers associated with a vnode.
123 	 */
124 #ifdef DIAGNOSTIC
125 loop:
126 #endif
127 	vfsync(vp, ap->a_waitfor, 0, NULL, NULL);
128 #ifdef DIAGNOSTIC
129 	if (ap->a_waitfor == MNT_WAIT && !RB_EMPTY(&vp->v_rbdirty_tree)) {
130 		vprint("hpfs_fsync: dirty", vp);
131 		goto loop;
132 	}
133 #endif
134 
135 	/*
136 	 * Write out the on-disc version of the vnode.
137 	 */
138 	return hpfs_update(VTOHP(vp));
139 }
140 
141 #endif
142 
143 /*
144  * hpfs_ioctl(struct vnode *a_vp, u_long a_command, caddr_t a_data,
145  *	      int a_fflag, struct ucred *a_cred, struct proc *a_td)
146  */
147 static int
148 hpfs_ioctl(struct vop_ioctl_args *ap)
149 {
150 	struct vnode *vp = ap->a_vp;
151 	struct hpfsnode *hp = VTOHP(vp);
152 	int error;
153 
154 	kprintf("hpfs_ioctl(0x%x, 0x%lx, 0x%p, 0x%x): ",
155 		hp->h_no, ap->a_command, ap->a_data, ap->a_fflag);
156 
157 	switch (ap->a_command) {
158 	case HPFSIOCGEANUM: {
159 		u_long eanum;
160 		u_long passed;
161 		struct ea *eap;
162 
163 		eanum = 0;
164 
165 		if (hp->h_fn.fn_ealen > 0) {
166 			eap = (struct ea *)&(hp->h_fn.fn_int);
167 			passed = 0;
168 
169 			while (passed < hp->h_fn.fn_ealen) {
170 
171 				kprintf("EAname: %s\n", EA_NAME(eap));
172 
173 				eanum++;
174 				passed += sizeof(struct ea) +
175 					  eap->ea_namelen + 1 + eap->ea_vallen;
176 				eap = (struct ea *)((caddr_t)hp->h_fn.fn_int +
177 						passed);
178 			}
179 			error = 0;
180 		} else {
181 			error = ENOENT;
182 		}
183 
184 		kprintf("%lu eas\n", eanum);
185 
186 		*(u_long *)ap->a_data = eanum;
187 
188 		break;
189 	}
190 	case HPFSIOCGEASZ: {
191 		u_long eanum;
192 		u_long passed;
193 		struct ea *eap;
194 
195 		kprintf("EA%ld\n", *(u_long *)ap->a_data);
196 
197 		eanum = 0;
198 		if (hp->h_fn.fn_ealen > 0) {
199 			eap = (struct ea *)&(hp->h_fn.fn_int);
200 			passed = 0;
201 
202 			error = ENOENT;
203 			while (passed < hp->h_fn.fn_ealen) {
204 				kprintf("EAname: %s\n", EA_NAME(eap));
205 
206 				if (eanum == *(u_long *)ap->a_data) {
207 					*(u_long *)ap->a_data =
208 					  	eap->ea_namelen + 1 +
209 						eap->ea_vallen;
210 
211 					error = 0;
212 					break;
213 				}
214 
215 				eanum++;
216 				passed += sizeof(struct ea) +
217 					  eap->ea_namelen + 1 + eap->ea_vallen;
218 				eap = (struct ea *)((caddr_t)hp->h_fn.fn_int +
219 						passed);
220 			}
221 		} else {
222 			error = ENOENT;
223 		}
224 
225 		break;
226 	}
227 	case HPFSIOCRDEA: {
228 		u_long eanum;
229 		u_long passed;
230 		struct hpfs_rdea *rdeap;
231 		struct ea *eap;
232 
233 		rdeap = (struct hpfs_rdea *)ap->a_data;
234 		kprintf("EA%ld\n", rdeap->ea_no);
235 
236 		eanum = 0;
237 		if (hp->h_fn.fn_ealen > 0) {
238 			eap = (struct ea *)&(hp->h_fn.fn_int);
239 			passed = 0;
240 
241 			error = ENOENT;
242 			while (passed < hp->h_fn.fn_ealen) {
243 				kprintf("EAname: %s\n", EA_NAME(eap));
244 
245 				if (eanum == rdeap->ea_no) {
246 					rdeap->ea_sz = eap->ea_namelen + 1 +
247 							eap->ea_vallen;
248 					copyout(EA_NAME(eap),rdeap->ea_data,
249 						rdeap->ea_sz);
250 					error = 0;
251 					break;
252 				}
253 
254 				eanum++;
255 				passed += sizeof(struct ea) +
256 					  eap->ea_namelen + 1 + eap->ea_vallen;
257 				eap = (struct ea *)((caddr_t)hp->h_fn.fn_int +
258 						passed);
259 			}
260 		} else {
261 			error = ENOENT;
262 		}
263 
264 		break;
265 	}
266 	default:
267 		error = EOPNOTSUPP;
268 		break;
269 	}
270 	return (error);
271 }
272 
273 /*
274  * Map file offset to disk offset.
275  *
276  * hpfs_bmap(struct vnode *a_vp, off_t a_loffset, struct vnode **a_vpp,
277  *	     off_t *a_doffsetp, int *a_runp, int *a_runb)
278  */
279 int
280 hpfs_bmap(struct vop_bmap_args *ap)
281 {
282 	struct hpfsnode *hp = VTOHP(ap->a_vp);
283 	int error;
284 	daddr_t lbn;
285 	daddr_t dbn;
286 
287 	if (ap->a_vpp != NULL)
288 		*ap->a_vpp = hp->h_devvp;
289 	if (ap->a_runb != NULL)
290 		*ap->a_runb = 0;
291 	if (ap->a_doffsetp == NULL)
292 		return (0);
293 
294 	dprintf(("hpfs_bmap(0x%x, 0x%x): ",hp->h_no, ap->a_bn));
295 
296 	lbn = ap->a_loffset >> DEV_BSHIFT;
297 	KKASSERT(((int)ap->a_loffset & DEV_BMASK) == 0);
298 
299 	error = hpfs_hpbmap (hp, lbn, &dbn, ap->a_runp);
300 	if (error || dbn == (daddr_t)-1) {
301 		*ap->a_doffsetp = NOOFFSET;
302 	} else {
303 		*ap->a_doffsetp = (off_t)dbn << DEV_BSHIFT;
304 	}
305 	return (error);
306 }
307 
308 /*
309  * hpfs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
310  *	     struct ucred *a_cred)
311  */
312 static int
313 hpfs_read(struct vop_read_args *ap)
314 {
315 	struct vnode *vp = ap->a_vp;
316 	struct hpfsnode *hp = VTOHP(vp);
317 	struct uio *uio = ap->a_uio;
318 	struct buf *bp;
319 	u_int xfersz, toread;
320 	u_int off;
321 	daddr_t lbn, bn;
322 	int resid;
323 	int runl;
324 	int error = 0;
325 
326 	resid = min (uio->uio_resid, hp->h_fn.fn_size - uio->uio_offset);
327 
328 	dprintf(("hpfs_read(0x%x, off: %d resid: %d, segflg: %d): [resid: 0x%x]\n",hp->h_no,(u_int32_t)uio->uio_offset,uio->uio_resid,uio->uio_segflg, resid));
329 
330 	while (resid) {
331 		lbn = uio->uio_offset >> DEV_BSHIFT;
332 		off = uio->uio_offset & (DEV_BSIZE - 1);
333 		dprintf(("hpfs_read: resid: 0x%x lbn: 0x%x off: 0x%x\n",
334 			uio->uio_resid, lbn, off));
335 		error = hpfs_hpbmap(hp, lbn, &bn, &runl);
336 		if (error)
337 			return (error);
338 
339 		toread = min(off + resid, min(DFLTPHYS, (runl+1)*DEV_BSIZE));
340 		xfersz = (toread + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
341 		dprintf(("hpfs_read: bn: 0x%x (0x%x) toread: 0x%x (0x%x)\n",
342 			bn, runl, toread, xfersz));
343 
344 		if (toread == 0)
345 			break;
346 
347 		error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp);
348 		if (error) {
349 			brelse(bp);
350 			break;
351 		}
352 
353 		error = uiomove(bp->b_data + off, toread - off, uio);
354 		if(error) {
355 			brelse(bp);
356 			break;
357 		}
358 		brelse(bp);
359 		resid -= toread;
360 	}
361 	dprintf(("hpfs_read: successful\n"));
362 	return (error);
363 }
364 
365 /*
366  * hpfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
367  *	      struct ucred *a_cred)
368  */
369 static int
370 hpfs_write(struct vop_write_args *ap)
371 {
372 	struct vnode *vp = ap->a_vp;
373 	struct hpfsnode *hp = VTOHP(vp);
374 	struct uio *uio = ap->a_uio;
375 	struct buf *bp;
376 	u_int xfersz, towrite;
377 	u_int off;
378 	daddr_t lbn, bn;
379 	int runl;
380 	int error = 0;
381 
382 	dprintf(("hpfs_write(0x%x, off: %d resid: %d, segflg: %d):\n",hp->h_no,(u_int32_t)uio->uio_offset,uio->uio_resid,uio->uio_segflg));
383 
384 	if (ap->a_ioflag & IO_APPEND) {
385 		dprintf(("hpfs_write: APPEND mode\n"));
386 		uio->uio_offset = hp->h_fn.fn_size;
387 	}
388 	if (uio->uio_offset + uio->uio_resid > hp->h_fn.fn_size) {
389 		error = hpfs_extend (hp, uio->uio_offset + uio->uio_resid);
390 		if (error) {
391 			kprintf("hpfs_write: hpfs_extend FAILED %d\n", error);
392 			return (error);
393 		}
394 	}
395 
396 	while (uio->uio_resid) {
397 		lbn = uio->uio_offset >> DEV_BSHIFT;
398 		off = uio->uio_offset & (DEV_BSIZE - 1);
399 		dprintf(("hpfs_write: resid: 0x%x lbn: 0x%x off: 0x%x\n",
400 			uio->uio_resid, lbn, off));
401 		error = hpfs_hpbmap(hp, lbn, &bn, &runl);
402 		if (error)
403 			return (error);
404 
405 		towrite = min(off + uio->uio_resid, min(DFLTPHYS, (runl+1)*DEV_BSIZE));
406 		xfersz = (towrite + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
407 		dprintf(("hpfs_write: bn: 0x%x (0x%x) towrite: 0x%x (0x%x)\n",
408 			bn, runl, towrite, xfersz));
409 
410 		if ((off == 0) && (towrite == xfersz)) {
411 			bp = getblk(hp->h_devvp, dbtodoff(bn), xfersz, 0, 0);
412 			clrbuf(bp);
413 		} else {
414 			error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp);
415 			if (error) {
416 				brelse(bp);
417 				return (error);
418 			}
419 		}
420 
421 		error = uiomove(bp->b_data + off, towrite - off, uio);
422 		if(error) {
423 			brelse(bp);
424 			return (error);
425 		}
426 
427 		if (ap->a_ioflag & IO_SYNC)
428 			bwrite(bp);
429 		else
430 			bawrite(bp);
431 	}
432 
433 	dprintf(("hpfs_write: successful\n"));
434 	return (0);
435 }
436 
437 /*
438  * XXXXX do we need hpfsnode locking inside?
439  *
440  * hpfs_getattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred,
441  *		struct proc *a_td)
442  */
443 static int
444 hpfs_getattr(struct vop_getattr_args *ap)
445 {
446 	struct vnode *vp = ap->a_vp;
447 	struct hpfsnode *hp = VTOHP(vp);
448 	struct vattr *vap = ap->a_vap;
449 	int error;
450 
451 	dprintf(("hpfs_getattr(0x%x):\n", hp->h_no));
452 
453 #if defined(__DragonFly__)
454 	vap->va_fsid = dev2udev(hp->h_dev);
455 #else /* defined(__NetBSD__) */
456 	vap->va_fsid = ip->i_dev;
457 #endif
458 	vap->va_fileid = hp->h_no;
459 	vap->va_mode = hp->h_mode;
460 	vap->va_nlink = 1;
461 	vap->va_uid = hp->h_uid;
462 	vap->va_gid = hp->h_gid;
463 	vap->va_rmajor = VNOVAL;
464 	vap->va_rminor = VNOVAL;
465 	vap->va_size = hp->h_fn.fn_size;
466 	vap->va_bytes = ((hp->h_fn.fn_size + DEV_BSIZE-1) & ~(DEV_BSIZE-1)) +
467 			DEV_BSIZE;
468 
469 	if (!(hp->h_flag & H_PARVALID)) {
470 		error = hpfs_validateparent(hp);
471 		if (error)
472 			return (error);
473 	}
474 	vap->va_atime = hpfstimetounix(hp->h_atime);
475 	vap->va_mtime = hpfstimetounix(hp->h_mtime);
476 	vap->va_ctime = hpfstimetounix(hp->h_ctime);
477 
478 	vap->va_flags = 0;
479 	vap->va_gen = 0;
480 	vap->va_blocksize = DEV_BSIZE;
481 	vap->va_type = vp->v_type;
482 	vap->va_filerev = 0;
483 
484 	return (0);
485 }
486 
487 /*
488  * XXXXX do we need hpfsnode locking inside?
489  *
490  * hpfs_setattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred,
491  *		struct thread *a_td)
492  */
493 static int
494 hpfs_setattr(struct vop_setattr_args *ap)
495 {
496 	struct vnode *vp = ap->a_vp;
497 	struct hpfsnode *hp = VTOHP(vp);
498 	struct vattr *vap = ap->a_vap;
499 	struct ucred *cred = ap->a_cred;
500 	int error;
501 
502 	dprintf(("hpfs_setattr(0x%x):\n", hp->h_no));
503 
504 	/*
505 	 * Check for unsettable attributes.
506 	 */
507 	if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
508 	    (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
509 	    (vap->va_blocksize != VNOVAL) || (vap->va_rmajor != VNOVAL) ||
510 	    (vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) {
511 		dprintf(("hpfs_setattr: changing nonsettable attr\n"));
512 		return (EINVAL);
513 	}
514 
515 	/* Can't change flags XXX Could be implemented */
516 	if (vap->va_flags != VNOVAL) {
517 		kprintf("hpfs_setattr: FLAGS CANNOT BE SET\n");
518 		return (EINVAL);
519 	}
520 
521 	/* Can't change uid/gid XXX Could be implemented */
522 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
523 		kprintf("hpfs_setattr: UID/GID CANNOT BE SET\n");
524 		return (EINVAL);
525 	}
526 
527 	/* Can't change mode XXX Could be implemented */
528 	if (vap->va_mode != (mode_t)VNOVAL) {
529 		kprintf("hpfs_setattr: MODE CANNOT BE SET\n");
530 		return (EINVAL);
531 	}
532 
533 	/* Update times */
534 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
535 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
536 			return (EROFS);
537 		if (cred->cr_uid != hp->h_uid &&
538 		    (error = suser_cred(cred, PRISON_ROOT)) &&
539 		    ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
540 		    (error = VOP_ACCESS(vp, VWRITE, cred))))
541 			return (error);
542 		if (vap->va_atime.tv_sec != VNOVAL)
543 			hp->h_atime = vap->va_atime.tv_sec;
544 		if (vap->va_mtime.tv_sec != VNOVAL)
545 			hp->h_mtime = vap->va_mtime.tv_sec;
546 
547 		hp->h_flag |= H_PARCHANGE;
548 	}
549 
550 	if (vap->va_size != VNOVAL) {
551 		switch (vp->v_type) {
552 		case VDIR:
553 			return (EISDIR);
554 		case VREG:
555 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
556 				return (EROFS);
557 			break;
558 		default:
559 			kprintf("hpfs_setattr: WRONG v_type\n");
560 			return (EINVAL);
561 		}
562 
563 		if (vap->va_size < hp->h_fn.fn_size) {
564 #if defined(__DragonFly__)
565 			error = vtruncbuf(vp, vap->va_size, DEV_BSIZE);
566 			if (error)
567 				return (error);
568 #else /* defined(__NetBSD__) */
569 #error Need alternation for vtruncbuf()
570 #endif
571 			error = hpfs_truncate(hp, vap->va_size);
572 			if (error)
573 				return (error);
574 
575 		} else if (vap->va_size > hp->h_fn.fn_size) {
576 #if defined(__DragonFly__)
577 			vnode_pager_setsize(vp, vap->va_size);
578 #endif
579 			error = hpfs_extend(hp, vap->va_size);
580 			if (error)
581 				return (error);
582 		}
583 	}
584 
585 	return (0);
586 }
587 
588 /*
589  * Last reference to an node.  If necessary, write or delete it.
590  *
591  * hpfs_inactive(struct vnode *a_vp)
592  */
593 int
594 hpfs_inactive(struct vop_inactive_args *ap)
595 {
596 	struct vnode *vp = ap->a_vp;
597 	struct hpfsnode *hp = VTOHP(vp);
598 	int error;
599 
600 	dprintf(("hpfs_inactive(0x%x): \n", hp->h_no));
601 
602 	if (hp->h_flag & H_CHANGE) {
603 		dprintf(("hpfs_inactive: node changed, update\n"));
604 		error = hpfs_update (hp);
605 		if (error)
606 			return (error);
607 	}
608 
609 	if (hp->h_flag & H_PARCHANGE) {
610 		dprintf(("hpfs_inactive: parent node changed, update\n"));
611 		error = hpfs_updateparent (hp);
612 		if (error)
613 			return (error);
614 	}
615 
616 	if (prtactive && vp->v_sysref.refcnt > 1)
617 		vprint("hpfs_inactive: pushing active", vp);
618 
619 	if (hp->h_flag & H_INVAL) {
620 #if defined(__DragonFly__)
621 		vrecycle(vp);
622 #else /* defined(__NetBSD__) */
623 		vgone(vp);
624 #endif
625 		return (0);
626 	}
627 	return (0);
628 }
629 
630 /*
631  * Reclaim an inode so that it can be used for other purposes.
632  *
633  * hpfs_reclaim(struct vnode *a_vp)
634  */
635 int
636 hpfs_reclaim(struct vop_reclaim_args *ap)
637 {
638 	struct vnode *vp = ap->a_vp;
639 	struct hpfsnode *hp = VTOHP(vp);
640 
641 	dprintf(("hpfs_reclaim(0x%x0): \n", hp->h_no));
642 
643 	hpfs_hphashrem(hp);
644 
645 	/* Purge old data structures associated with the inode. */
646 	if (hp->h_devvp) {
647 		vrele(hp->h_devvp);
648 		hp->h_devvp = NULL;
649 	}
650 
651 	vp->v_data = NULL;
652 
653 	FREE(hp, M_HPFSNO);
654 
655 	return (0);
656 }
657 
658 /*
659  * hpfs_print(struct vnode *a_vp)
660  */
661 static int
662 hpfs_print(struct vop_print_args *ap)
663 {
664 	struct vnode *vp = ap->a_vp;
665 	struct hpfsnode *hp = VTOHP(vp);
666 
667 	kprintf("tag VT_HPFS, ino 0x%x",hp->h_no);
668 	lockmgr_printinfo(&vp->v_lock);
669 	kprintf("\n");
670 	return (0);
671 }
672 
673 /*
674  * Calculate the logical to physical mapping if not done already,
675  * then call the device strategy routine.
676  *
677  * In order to be able to swap to a file, the VOP_BMAP operation may not
678  * deadlock on memory.  See hpfs_bmap() for details. XXXXXXX (not impl)
679  *
680  * hpfs_strategy(struct vnode *a_vp, struct bio *a_bio)
681  */
682 int
683 hpfs_strategy(struct vop_strategy_args *ap)
684 {
685 	struct bio *bio = ap->a_bio;
686 	struct bio *nbio;
687 	struct buf *bp = bio->bio_buf;
688 	struct vnode *vp = ap->a_vp;
689 	struct hpfsnode *hp;
690 	int error;
691 
692 	dprintf(("hpfs_strategy(): \n"));
693 
694 	if (vp->v_type == VBLK || vp->v_type == VCHR)
695 		panic("hpfs_strategy: spec");
696 
697 	nbio = push_bio(bio);
698 	if (nbio->bio_offset == NOOFFSET) {
699 		error = VOP_BMAP(vp, bio->bio_offset, NULL, &nbio->bio_offset,
700 				 NULL, NULL);
701 		if (error) {
702 			kprintf("hpfs_strategy: VOP_BMAP FAILED %d\n", error);
703 			bp->b_error = error;
704 			bp->b_flags |= B_ERROR;
705 			/* I/O was never started on nbio, must biodone(bio) */
706 			biodone(bio);
707 			return (error);
708 		}
709 		if (nbio->bio_offset == NOOFFSET)
710 			vfs_bio_clrbuf(bp);
711 	}
712 	if (nbio->bio_offset == NOOFFSET) {
713 		/* I/O was never started on nbio, must biodone(bio) */
714 		biodone(bio);
715 		return (0);
716 	}
717         hp = VTOHP(ap->a_vp);
718 	vn_strategy(hp->h_devvp, nbio);
719 	return (0);
720 }
721 
722 /*
723  * XXXXX do we need hpfsnode locking inside?
724  *
725  * hpfs_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
726  *	       struct proc *a_td)
727  */
728 int
729 hpfs_access(struct vop_access_args *ap)
730 {
731 	struct vnode *vp = ap->a_vp;
732 	struct hpfsnode *hp = VTOHP(vp);
733 	struct ucred *cred = ap->a_cred;
734 	mode_t mask, mode = ap->a_mode;
735 	gid_t *gp;
736 	int i;
737 
738 	dprintf(("hpfs_access(0x%x):\n", hp->h_no));
739 
740 	/*
741 	 * Disallow write attempts on read-only file systems;
742 	 * unless the file is a socket, fifo, or a block or
743 	 * character device resident on the file system.
744 	 */
745 	if (mode & VWRITE) {
746 		switch ((int)vp->v_type) {
747 		case VDIR:
748 		case VLNK:
749 		case VREG:
750 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
751 				return (EROFS);
752 			break;
753 		}
754 	}
755 
756 	/* Otherwise, user id 0 always gets access. */
757 	if (cred->cr_uid == 0)
758 		return (0);
759 
760 	mask = 0;
761 
762 	/* Otherwise, check the owner. */
763 	if (cred->cr_uid == hp->h_uid) {
764 		if (mode & VEXEC)
765 			mask |= S_IXUSR;
766 		if (mode & VREAD)
767 			mask |= S_IRUSR;
768 		if (mode & VWRITE)
769 			mask |= S_IWUSR;
770 		return ((hp->h_mode & mask) == mask ? 0 : EACCES);
771 	}
772 
773 	/* Otherwise, check the groups. */
774 	for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++)
775 		if (hp->h_gid == *gp) {
776 			if (mode & VEXEC)
777 				mask |= S_IXGRP;
778 			if (mode & VREAD)
779 				mask |= S_IRGRP;
780 			if (mode & VWRITE)
781 				mask |= S_IWGRP;
782 			return ((hp->h_mode & mask) == mask ? 0 : EACCES);
783 		}
784 
785 	/* Otherwise, check everyone else. */
786 	if (mode & VEXEC)
787 		mask |= S_IXOTH;
788 	if (mode & VREAD)
789 		mask |= S_IROTH;
790 	if (mode & VWRITE)
791 		mask |= S_IWOTH;
792 	return ((hp->h_mode & mask) == mask ? 0 : EACCES);
793 }
794 
795 static int
796 hpfs_de_uiomove(int *error, struct hpfsmount *hpmp, struct hpfsdirent *dep,
797 		struct uio *uio)
798 {
799 	char convname[HPFS_MAXFILENAME + 1];
800 	int i, success;
801 
802 	dprintf(("[no: 0x%x, size: %d, name: %2d:%.*s, flag: 0x%x] ",
803 		dep->de_fnode, dep->de_size, dep->de_namelen,
804 		dep->de_namelen, dep->de_name, dep->de_flag));
805 
806 	/*strncpy(cde.d_name, dep->de_name, dep->de_namelen);*/
807 	for (i=0; i<dep->de_namelen; i++)
808 		convname[i] = hpfs_d2u(hpmp, dep->de_name[i]);
809 	convname[dep->de_namelen] = '\0';
810 
811 	success = vop_write_dirent(error, uio, dep->de_fnode,
812 			(dep->de_flag & DE_DIR) ? DT_DIR : DT_REG,
813 			dep->de_namelen, convname);
814 
815 	dprintf(("[0x%x] ", uio->uio_resid));
816 	return (success);
817 }
818 
819 
820 /*
821  * hpfs_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
822  *		int *a_ncookies, u_int **cookies)
823  */
824 int
825 hpfs_readdir(struct vop_readdir_args *ap)
826 {
827 	struct vnode *vp = ap->a_vp;
828 	struct hpfsnode *hp = VTOHP(vp);
829 	struct hpfsmount *hpmp = hp->h_hpmp;
830 	struct uio *uio = ap->a_uio;
831 	int ncookies = 0, i, num, cnum;
832 	int error = 0;
833 	struct buf *bp;
834 	struct dirblk *dp;
835 	struct hpfsdirent *dep;
836 	lsn_t olsn;
837 	lsn_t lsn;
838 	int level;
839 
840 	dprintf(("hpfs_readdir(0x%x, 0x%x, 0x%x): ",hp->h_no,(u_int32_t)uio->uio_offset,uio->uio_resid));
841 
842 	/*
843 	 * As we need to fake up . and .., and the remaining directory structure
844 	 * can't be expressed in one off_t as well, we just increment uio_offset
845 	 * by 1 for each entry.
846 	 *
847 	 * num is the entry we need to start reporting
848 	 * cnum is the current entry
849 	 */
850 	if (uio->uio_offset < 0 || uio->uio_offset > INT_MAX)
851 		return(EINVAL);
852 	if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
853 		return (error);
854 
855 	num = uio->uio_offset;
856 	cnum = 0;
857 
858 	if( num <= cnum ) {
859 		dprintf((". faked, "));
860 		if (vop_write_dirent(&error, uio, hp->h_no, DT_DIR, 1, "."))
861 			goto done;
862 		if (error)
863 			goto done;
864 		ncookies ++;
865 	}
866 	cnum++;
867 
868 	if( num <= cnum ) {
869 		dprintf((".. faked, "));
870 		if (vop_write_dirent(&error, uio, hp->h_fn.fn_parent, DT_DIR, 2, ".."))
871 			goto readdone;
872 		if (error)
873 			goto done;
874 		ncookies ++;
875 	}
876 	cnum++;
877 
878 	lsn = ((alleaf_t *)hp->h_fn.fn_abd)->al_lsn;
879 
880 	olsn = 0;
881 	level = 1;
882 
883 dive:
884 	dprintf(("[dive 0x%x] ", lsn));
885 	error = bread(hp->h_devvp, dbtodoff(lsn), D_BSIZE, &bp);
886 	if (error) {
887 		brelse(bp);
888 		goto done;
889 	}
890 
891 	dp = (struct dirblk *) bp->b_data;
892 	if (dp->d_magic != D_MAGIC) {
893 		kprintf("hpfs_readdir: MAGIC DOESN'T MATCH\n");
894 		brelse(bp);
895 		error = EINVAL;
896 		goto done;
897 	}
898 
899 	dep = D_DIRENT(dp);
900 
901 	if (olsn) {
902 		dprintf(("[restore 0x%x] ", olsn));
903 
904 		while(!(dep->de_flag & DE_END) ) {
905 			if((dep->de_flag & DE_DOWN) &&
906 			   (olsn == DE_DOWNLSN(dep)))
907 					 break;
908 			dep = (hpfsdirent_t *)((caddr_t)dep + dep->de_reclen);
909 		}
910 
911 		if((dep->de_flag & DE_DOWN) && (olsn == DE_DOWNLSN(dep))) {
912 			if (dep->de_flag & DE_END)
913 				goto blockdone;
914 
915 			if (!(dep->de_flag & DE_SPECIAL)) {
916 				if (num <= cnum) {
917 					if (hpfs_de_uiomove(&error, hpmp, dep, uio)) {
918 						brelse(bp);
919 						dprintf(("[resid] "));
920 						goto readdone;
921 					}
922 					if (error) {
923 						brelse (bp);
924 						goto done;
925 					}
926 					ncookies++;
927 				}
928 				cnum++;
929 			}
930 
931 			dep = (hpfsdirent_t *)((caddr_t)dep + dep->de_reclen);
932 		} else {
933 			kprintf("hpfs_readdir: ERROR! oLSN not found\n");
934 			brelse(bp);
935 			error = EINVAL;
936 			goto done;
937 		}
938 	}
939 
940 	olsn = 0;
941 
942 	while(!(dep->de_flag & DE_END)) {
943 		if(dep->de_flag & DE_DOWN) {
944 			lsn = DE_DOWNLSN(dep);
945 			brelse(bp);
946 			level++;
947 			goto dive;
948 		}
949 
950 		if (!(dep->de_flag & DE_SPECIAL)) {
951 			if (num <= cnum) {
952 				if (hpfs_de_uiomove(&error, hpmp, dep, uio)) {
953 					brelse(bp);
954 					dprintf(("[resid] "));
955 					goto readdone;
956 				}
957 				if (error) {
958 					brelse (bp);
959 					goto done;
960 				}
961 				ncookies++;
962 			}
963 			cnum++;
964 		}
965 
966 		dep = (hpfsdirent_t *)((caddr_t)dep + dep->de_reclen);
967 	}
968 
969 	if(dep->de_flag & DE_DOWN) {
970 		dprintf(("[enddive] "));
971 		lsn = DE_DOWNLSN(dep);
972 		brelse(bp);
973 		level++;
974 		goto dive;
975 	}
976 
977 blockdone:
978 	dprintf(("[EOB] "));
979 	olsn = lsn;
980 	lsn = dp->d_parent;
981 	brelse(bp);
982 	level--;
983 
984 	dprintf(("[level %d] ", level));
985 
986 	if (level > 0)
987 		goto dive;	/* undive really */
988 
989 	if (ap->a_eofflag) {
990 	    dprintf(("[EOF] "));
991 	    *ap->a_eofflag = 1;
992 	}
993 
994 readdone:
995 	uio->uio_offset = cnum;
996 	dprintf(("[readdone]\n"));
997 	if (!error && ap->a_ncookies != NULL) {
998 #if defined(__DragonFly__)
999 		u_long *cookies;
1000 		u_long *cookiep;
1001 #else /* defined(__NetBSD__) */
1002 		off_t *cookies;
1003 		off_t *cookiep;
1004 #endif
1005 
1006 		dprintf(("%d cookies, ",ncookies));
1007 		if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1008 			panic("hpfs_readdir: unexpected uio from NFS server");
1009 #if defined(__DragonFly__)
1010 		MALLOC(cookies, u_long *, ncookies * sizeof(u_long),
1011 		       M_TEMP, M_WAITOK);
1012 #else /* defined(__NetBSD__) */
1013 		MALLOC(cookies, off_t *, ncookies * sizeof(off_t),
1014 		       M_TEMP, M_WAITOK);
1015 #endif
1016 		for (cookiep = cookies, i=0; i < ncookies; i++)
1017 			*cookiep++ = (u_int)++num;
1018 
1019 		*ap->a_ncookies = ncookies;
1020 		*ap->a_cookies = cookies;
1021 	}
1022 
1023 done:
1024 	vn_unlock(ap->a_vp);
1025 	return (error);
1026 }
1027 
1028 /*
1029  * hpfs_lookup(struct vnode *a_dvp, struct vnode **a_vpp,
1030  *		struct componentname *a_cnp)
1031  */
1032 int
1033 hpfs_lookup(struct vop_old_lookup_args *ap)
1034 {
1035 	struct vnode *dvp = ap->a_dvp;
1036 	struct hpfsnode *dhp = VTOHP(dvp);
1037 	struct hpfsmount *hpmp = dhp->h_hpmp;
1038 	struct componentname *cnp = ap->a_cnp;
1039 	struct ucred *cred = cnp->cn_cred;
1040 	int error;
1041 	int nameiop = cnp->cn_nameiop;
1042 	int flags = cnp->cn_flags;
1043 	int lockparent = flags & CNP_LOCKPARENT;
1044 #if HPFS_DEBUG
1045 	int wantparent = flags & (CNP_LOCKPARENT | CNP_WANTPARENT);
1046 #endif
1047 	dprintf(("hpfs_lookup(0x%x, %s, %ld, %d, %d): \n",
1048 		dhp->h_no, cnp->cn_nameptr, cnp->cn_namelen,
1049 		lockparent, wantparent));
1050 
1051 	if (nameiop != NAMEI_CREATE && nameiop != NAMEI_DELETE && nameiop != NAMEI_LOOKUP) {
1052 		kprintf("hpfs_lookup: LOOKUP, DELETE and CREATE are only supported\n");
1053 		return (EOPNOTSUPP);
1054 	}
1055 
1056 	error = VOP_ACCESS(dvp, VEXEC, cred);
1057 	if(error)
1058 		return (error);
1059 
1060 	if( (cnp->cn_namelen == 1) &&
1061 	    !strncmp(cnp->cn_nameptr,".",1) ) {
1062 		dprintf(("hpfs_lookup(0x%x,...): . faked\n",dhp->h_no));
1063 
1064 		vref(dvp);
1065 		*ap->a_vpp = dvp;
1066 
1067 		return (0);
1068 	} else if( (cnp->cn_namelen == 2) &&
1069 	    !strncmp(cnp->cn_nameptr,"..",2) && (flags & CNP_ISDOTDOT) ) {
1070 		dprintf(("hpfs_lookup(0x%x,...): .. faked (0x%x)\n",
1071 			dhp->h_no, dhp->h_fn.fn_parent));
1072 
1073 		VOP__UNLOCK(dvp, 0);
1074 
1075 		error = VFS_VGET(hpmp->hpm_mp,
1076 				 dhp->h_fn.fn_parent, ap->a_vpp);
1077 		if (error) {
1078 			VOP__LOCK(dvp, 0);
1079 			return(error);
1080 		}
1081 
1082 		if (lockparent && (error = VOP__LOCK(dvp, 0))) {
1083 			vput( *(ap->a_vpp) );
1084 			return (error);
1085 		}
1086 		return (error);
1087 	} else {
1088 		struct buf *bp;
1089 		struct hpfsdirent *dep;
1090 		struct hpfsnode *hp;
1091 
1092 		error = hpfs_genlookupbyname(dhp,
1093 				cnp->cn_nameptr, cnp->cn_namelen, &bp, &dep);
1094 		if (error) {
1095 			if (error == ENOENT &&
1096 			    (nameiop == NAMEI_CREATE || nameiop == NAMEI_RENAME)) {
1097 				if(!lockparent)
1098 					VOP__UNLOCK(dvp, 0);
1099 				return (EJUSTRETURN);
1100 			}
1101 
1102 			return (error);
1103 		}
1104 
1105 		dprintf(("hpfs_lookup: fnode: 0x%x, CPID: 0x%x\n",
1106 			 dep->de_fnode, dep->de_cpid));
1107 
1108 		if (nameiop == NAMEI_DELETE) {
1109 			error = VOP_ACCESS(dvp, VWRITE, cred);
1110 			if (error) {
1111 				brelse(bp);
1112 				return (error);
1113 			}
1114 		}
1115 
1116 		if (dhp->h_no == dep->de_fnode) {
1117 			brelse(bp);
1118 			vref(dvp);
1119 			*ap->a_vpp = dvp;
1120 			return (0);
1121 		}
1122 
1123 		error = VFS_VGET(hpmp->hpm_mp, dep->de_fnode, ap->a_vpp);
1124 		if (error) {
1125 			kprintf("hpfs_lookup: VFS_VGET FAILED %d\n", error);
1126 			brelse(bp);
1127 			return(error);
1128 		}
1129 
1130 		hp = VTOHP(*ap->a_vpp);
1131 
1132 		hp->h_mtime = dep->de_mtime;
1133 		hp->h_ctime = dep->de_ctime;
1134 		hp->h_atime = dep->de_atime;
1135 		bcopy(dep->de_name, hp->h_name, dep->de_namelen);
1136 		hp->h_name[dep->de_namelen] = '\0';
1137 		hp->h_namelen = dep->de_namelen;
1138 		hp->h_flag |= H_PARVALID;
1139 
1140 		brelse(bp);
1141 
1142 		if(!lockparent)
1143 			VOP__UNLOCK(dvp, 0);
1144 	}
1145 	return (error);
1146 }
1147 
1148 /*
1149  * hpfs_remove(struct vnode *a_dvp, struct vnode *a_vp,
1150  *		struct componentname *a_cnp)
1151  */
1152 int
1153 hpfs_remove(struct vop_old_remove_args *ap)
1154 {
1155 	int error;
1156 
1157 	dprintf(("hpfs_remove(0x%x, %s, %ld): \n", VTOHP(ap->a_vp)->h_no,
1158 		ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen));
1159 
1160 	if (ap->a_vp->v_type == VDIR)
1161 		return (EPERM);
1162 
1163 	error = hpfs_removefnode (ap->a_dvp, ap->a_vp, ap->a_cnp);
1164 	return (error);
1165 }
1166 
1167 /*
1168  * hpfs_create(struct vnode *a_dvp, struct vnode **a_vpp,
1169  *		struct componentname *a_cnp, struct vattr *a_vap)
1170  */
1171 int
1172 hpfs_create(struct vop_old_create_args *ap)
1173 {
1174 	int error;
1175 
1176 	dprintf(("hpfs_create(0x%x, %s, %ld): \n", VTOHP(ap->a_dvp)->h_no,
1177 		ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen));
1178 
1179 	error = hpfs_makefnode (ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1180 
1181 	return (error);
1182 }
1183 
1184 /*
1185  * Return POSIX pathconf information applicable to NTFS filesystem
1186  *
1187  * hpfs_pathconf(struct vnode *a_vp, int a_name, t *a_retval)
1188  */
1189 int
1190 hpfs_pathconf(struct vop_pathconf_args *ap)
1191 {
1192 	switch (ap->a_name) {
1193 	case _PC_LINK_MAX:
1194 		*ap->a_retval = 1;
1195 		return (0);
1196 	case _PC_NAME_MAX:
1197 		*ap->a_retval = HPFS_MAXFILENAME;
1198 		return (0);
1199 	case _PC_PATH_MAX:
1200 		*ap->a_retval = PATH_MAX;
1201 		return (0);
1202 	case _PC_CHOWN_RESTRICTED:
1203 		*ap->a_retval = 1;
1204 		return (0);
1205 	case _PC_NO_TRUNC:
1206 		*ap->a_retval = 0;
1207 		return (0);
1208 #if defined(__NetBSD__)
1209 	case _PC_SYNC_IO:
1210 		*ap->a_retval = 1;
1211 		return (0);
1212 	case _PC_FILESIZEBITS:
1213 		*ap->a_retval = 32;
1214 		return (0);
1215 #endif
1216 	default:
1217 		return (EINVAL);
1218 	}
1219 	/* NOTREACHED */
1220 }
1221 
1222 
1223 /*
1224  * Global vfs data structures
1225  */
1226 
1227 struct vop_ops hpfs_vnode_vops = {
1228 	.vop_default =		vop_defaultop,
1229 	.vop_getattr =		hpfs_getattr,
1230 	.vop_setattr =		hpfs_setattr,
1231 	.vop_inactive =		hpfs_inactive,
1232 	.vop_reclaim =		hpfs_reclaim,
1233 	.vop_print =		hpfs_print,
1234 	.vop_old_create =	hpfs_create,
1235 	.vop_old_remove =	hpfs_remove,
1236 	.vop_old_lookup =	hpfs_lookup,
1237 	.vop_access =		hpfs_access,
1238 	.vop_readdir =		hpfs_readdir,
1239 	.vop_fsync =		hpfs_fsync,
1240 	.vop_bmap =		hpfs_bmap,
1241 	.vop_getpages =		hpfs_getpages,
1242 	.vop_putpages =		hpfs_putpages,
1243 	.vop_strategy =		hpfs_strategy,
1244 	.vop_read =		hpfs_read,
1245 	.vop_write =		hpfs_write,
1246 	.vop_ioctl =		hpfs_ioctl,
1247 	.vop_pathconf =		hpfs_pathconf
1248 };
1249 
1250