1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)mfs_vnops.c 7.19 (Berkeley) 12/05/90 8 */ 9 10 #include "param.h" 11 #include "time.h" 12 #include "kernel.h" 13 #include "proc.h" 14 #include "user.h" 15 #include "buf.h" 16 #include "errno.h" 17 #include "map.h" 18 #include "vnode.h" 19 #include "../ufs/mfsnode.h" 20 #include "../ufs/mfsiom.h" 21 #include "machine/vmparam.h" 22 #include "machine/mtpr.h" 23 24 #if !defined(hp300) && !defined(i386) 25 static int mfsmap_want; /* 1 => need kernel I/O resources */ 26 struct map mfsmap[MFS_MAPSIZE]; 27 extern char mfsiobuf[]; 28 #endif 29 30 /* 31 * mfs vnode operations. 32 */ 33 int mfs_open(), 34 mfs_strategy(), 35 mfs_bmap(), 36 mfs_ioctl(), 37 mfs_close(), 38 mfs_inactive(), 39 mfs_print(), 40 mfs_badop(), 41 mfs_nullop(); 42 43 struct vnodeops mfs_vnodeops = { 44 mfs_badop, /* lookup */ 45 mfs_badop, /* create */ 46 mfs_badop, /* mknod */ 47 mfs_open, /* open */ 48 mfs_close, /* close */ 49 mfs_badop, /* access */ 50 mfs_badop, /* getattr */ 51 mfs_badop, /* setattr */ 52 mfs_badop, /* read */ 53 mfs_badop, /* write */ 54 mfs_ioctl, /* ioctl */ 55 mfs_badop, /* select */ 56 mfs_badop, /* mmap */ 57 mfs_badop, /* fsync */ 58 mfs_badop, /* seek */ 59 mfs_badop, /* remove */ 60 mfs_badop, /* link */ 61 mfs_badop, /* rename */ 62 mfs_badop, /* mkdir */ 63 mfs_badop, /* rmdir */ 64 mfs_badop, /* symlink */ 65 mfs_badop, /* readdir */ 66 mfs_badop, /* readlink */ 67 mfs_badop, /* abortop */ 68 mfs_inactive, /* inactive */ 69 mfs_nullop, /* reclaim */ 70 mfs_nullop, /* lock */ 71 mfs_nullop, /* unlock */ 72 mfs_bmap, /* bmap */ 73 mfs_strategy, /* strategy */ 74 mfs_print, /* print */ 75 mfs_nullop, /* islocked */ 76 }; 77 78 /* 79 * Vnode Operations. 80 * 81 * Open called to allow memory filesystem to initialize and 82 * validate before actual IO. Record our process identifier 83 * so we can tell when we are doing I/O to ourself. 84 */ 85 /* ARGSUSED */ 86 mfs_open(vp, mode, cred) 87 register struct vnode *vp; 88 int mode; 89 struct ucred *cred; 90 { 91 92 if (vp->v_type != VBLK) { 93 panic("mfs_ioctl not VBLK"); 94 /* NOTREACHED */ 95 } 96 return (0); 97 } 98 99 /* 100 * Ioctl operation. 101 */ 102 /* ARGSUSED */ 103 mfs_ioctl(vp, com, data, fflag, cred) 104 struct vnode *vp; 105 int com; 106 caddr_t data; 107 int fflag; 108 struct ucred *cred; 109 { 110 111 return (-1); 112 } 113 114 /* 115 * Pass I/O requests to the memory filesystem process. 116 */ 117 mfs_strategy(bp) 118 register struct buf *bp; 119 { 120 register struct mfsnode *mfsp; 121 struct vnode *vp; 122 123 if (vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0) 124 panic("mfs_strategy: bad dev"); 125 mfsp = VTOMFS(vp); 126 if (mfsp->mfs_pid == u.u_procp->p_pid) { 127 mfs_doio(bp, mfsp->mfs_baseoff); 128 } else { 129 bp->av_forw = mfsp->mfs_buflist; 130 mfsp->mfs_buflist = bp; 131 wakeup((caddr_t)vp); 132 } 133 return (0); 134 } 135 136 #if defined(vax) || defined(tahoe) 137 /* 138 * Memory file system I/O. 139 * 140 * Essentially play ubasetup() and disk interrupt service routine by 141 * doing the copies to or from the memfs process. If doing physio 142 * (i.e. pagein), we must map the I/O through the kernel virtual 143 * address space. 144 */ 145 mfs_doio(bp, base) 146 register struct buf *bp; 147 caddr_t base; 148 { 149 register struct pte *pte, *ppte; 150 register caddr_t vaddr; 151 int off, npf, npf2, reg; 152 caddr_t kernaddr, offset; 153 154 /* 155 * For phys I/O, map the b_addr into kernel virtual space using 156 * the Mfsiomap pte's. 157 */ 158 if ((bp->b_flags & B_PHYS) == 0) { 159 kernaddr = bp->b_un.b_addr; 160 } else { 161 if (bp->b_flags & (B_PAGET | B_UAREA | B_DIRTY)) 162 panic("swap on memfs?"); 163 off = (int)bp->b_un.b_addr & PGOFSET; 164 npf = btoc(bp->b_bcount + off); 165 /* 166 * Get some mapping page table entries 167 */ 168 while ((reg = rmalloc(mfsmap, (long)npf)) == 0) { 169 mfsmap_want++; 170 sleep((caddr_t)&mfsmap_want, PZERO-1); 171 } 172 reg--; 173 pte = vtopte(bp->b_proc, btop(bp->b_un.b_addr)); 174 /* 175 * Do vmaccess() but with the Mfsiomap page table. 176 */ 177 ppte = &Mfsiomap[reg]; 178 vaddr = &mfsiobuf[reg * NBPG]; 179 kernaddr = vaddr + off; 180 for (npf2 = npf; npf2; npf2--) { 181 mapin(ppte, (u_int)vaddr, pte->pg_pfnum, 182 (int)(PG_V|PG_KW)); 183 #if defined(tahoe) 184 if ((bp->b_flags & B_READ) == 0) 185 mtpr(P1DC, vaddr); 186 #endif 187 ppte++; 188 pte++; 189 vaddr += NBPG; 190 } 191 } 192 offset = base + (bp->b_blkno << DEV_BSHIFT); 193 if (bp->b_flags & B_READ) 194 bp->b_error = copyin(offset, kernaddr, bp->b_bcount); 195 else 196 bp->b_error = copyout(kernaddr, offset, bp->b_bcount); 197 if (bp->b_error) 198 bp->b_flags |= B_ERROR; 199 /* 200 * Release pte's used by physical I/O. 201 */ 202 if (bp->b_flags & B_PHYS) { 203 rmfree(mfsmap, (long)npf, (long)++reg); 204 if (mfsmap_want) { 205 mfsmap_want = 0; 206 wakeup((caddr_t)&mfsmap_want); 207 } 208 } 209 biodone(bp); 210 } 211 #endif /* vax || tahoe */ 212 213 #if defined(hp300) || defined(i386) 214 /* 215 * Memory file system I/O. 216 * 217 * Trivial on the HP since buffer has already been mapping into KVA space. 218 */ 219 mfs_doio(bp, base) 220 register struct buf *bp; 221 caddr_t base; 222 { 223 base += (bp->b_blkno << DEV_BSHIFT); 224 if (bp->b_flags & B_READ) 225 bp->b_error = copyin(base, bp->b_un.b_addr, bp->b_bcount); 226 else 227 bp->b_error = copyout(bp->b_un.b_addr, base, bp->b_bcount); 228 if (bp->b_error) 229 bp->b_flags |= B_ERROR; 230 biodone(bp); 231 } 232 #endif 233 234 /* 235 * This is a noop, simply returning what one has been given. 236 */ 237 mfs_bmap(vp, bn, vpp, bnp) 238 struct vnode *vp; 239 daddr_t bn; 240 struct vnode **vpp; 241 daddr_t *bnp; 242 { 243 244 if (vpp != NULL) 245 *vpp = vp; 246 if (bnp != NULL) 247 *bnp = bn; 248 return (0); 249 } 250 251 /* 252 * Memory filesystem close routine 253 */ 254 /* ARGSUSED */ 255 mfs_close(vp, flag, cred) 256 register struct vnode *vp; 257 int flag; 258 struct ucred *cred; 259 { 260 register struct mfsnode *mfsp = VTOMFS(vp); 261 register struct buf *bp; 262 263 /* 264 * Finish any pending I/O requests. 265 */ 266 while (bp = mfsp->mfs_buflist) { 267 mfsp->mfs_buflist = bp->av_forw; 268 mfs_doio(bp, mfsp->mfs_baseoff); 269 wakeup((caddr_t)bp); 270 } 271 /* 272 * On last close of a memory filesystem 273 * we must invalidate any in core blocks, so that 274 * we can, free up its vnode. 275 */ 276 vflushbuf(vp, 0); 277 if (vinvalbuf(vp, 1)) 278 return (0); 279 /* 280 * There should be no way to have any more uses of this 281 * vnode, so if we find any other uses, it is a panic. 282 */ 283 if (vp->v_usecount > 1) 284 printf("mfs_close: ref count %d > 1\n", vp->v_usecount); 285 if (vp->v_usecount > 1 || mfsp->mfs_buflist) 286 panic("mfs_close"); 287 /* 288 * Send a request to the filesystem server to exit. 289 */ 290 mfsp->mfs_buflist = (struct buf *)(-1); 291 wakeup((caddr_t)vp); 292 return (0); 293 } 294 295 /* 296 * Memory filesystem inactive routine 297 */ 298 /* ARGSUSED */ 299 mfs_inactive(vp) 300 struct vnode *vp; 301 { 302 303 if (VTOMFS(vp)->mfs_buflist != (struct buf *)(-1)) 304 panic("mfs_inactive: not inactive"); 305 return (0); 306 } 307 308 /* 309 * Print out the contents of an mfsnode. 310 */ 311 mfs_print(vp) 312 struct vnode *vp; 313 { 314 register struct mfsnode *mfsp = VTOMFS(vp); 315 316 printf("tag VT_MFS, pid %d, base %d, size %d\n", mfsp->mfs_pid, 317 mfsp->mfs_baseoff, mfsp->mfs_size); 318 } 319 320 /* 321 * Block device bad operation 322 */ 323 mfs_badop() 324 { 325 326 panic("mfs_badop called\n"); 327 /* NOTREACHED */ 328 } 329 330 /* 331 * Block device null operation 332 */ 333 mfs_nullop() 334 { 335 336 return (0); 337 } 338 339 /* 340 * Memory based filesystem initialization. 341 */ 342 mfs_init() 343 { 344 345 #if !defined(hp300) && !defined(i386) 346 rminit(mfsmap, (long)MFS_MAPREG, (long)1, "mfs mapreg", MFS_MAPSIZE); 347 #endif 348 } 349