1 /*- 2 * Copyright (c) 2019 Tomohiro Kusumi <tkusumi@netbsd.org> 3 * Copyright (c) 2019 The DragonFly Project 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include "fuse.h" 29 30 static MALLOC_DEFINE(M_FUSE_NODE, "fuse_node", "FUSE node"); 31 32 static struct objcache *fuse_node_objcache = NULL; 33 static struct objcache_malloc_args fuse_node_args = { 34 sizeof(struct fuse_node), M_FUSE_NODE, 35 }; 36 37 static int 38 fuse_node_cmp(struct fuse_node *p1, struct fuse_node *p2) 39 { 40 if (p1->ino < p2->ino) 41 return -1; 42 if (p1->ino > p2->ino) 43 return 1; 44 return 0; 45 } 46 47 RB_PROTOTYPE2(fuse_node_tree, fuse_node, entry, fuse_node_cmp, uint64_t); 48 RB_GENERATE2(fuse_node_tree, fuse_node, node_entry, fuse_node_cmp, 49 uint64_t, ino); 50 51 void 52 fuse_node_new(struct fuse_mount *fmp, uint64_t ino, enum vtype vtyp, 53 struct fuse_node **fnpp) 54 { 55 struct fuse_node *fnp; 56 57 fnp = objcache_get(fuse_node_objcache, M_WAITOK); 58 bzero(fnp, sizeof(*fnp)); 59 60 fnp->fmp = fmp; 61 62 mtx_init(&fnp->node_lock, "fuse_node_lock"); 63 64 fnp->ino = ino; 65 fnp->type = vtyp; 66 fnp->size = 0; 67 fnp->nlookup = 0; 68 fnp->fh = 0; 69 fnp->closed = false; 70 71 if (RB_INSERT(fuse_node_tree, &fmp->node_head, fnp)) { 72 panic("fuse_node_new: cannot insert %p\n", fnp); 73 } 74 75 *fnpp = fnp; 76 } 77 78 void 79 fuse_node_free(struct fuse_mount *fmp, struct fuse_node *fnp) 80 { 81 fuse_dbg("free ino=%ju\n", fnp->ino); 82 83 mtx_lock(&fmp->ino_lock); 84 RB_REMOVE(fuse_node_tree, &fmp->node_head, fnp); 85 mtx_unlock(&fmp->ino_lock); 86 87 objcache_put(fuse_node_objcache, fnp); 88 } 89 90 /* 91 * Allocate or find the fuse node for the specified inode number and assign 92 * its vnode. 93 */ 94 int 95 fuse_alloc_node(struct fuse_mount *fmp, struct fuse_node *dfnp, 96 uint64_t ino, enum vtype vtyp, struct vnode **vpp) 97 { 98 struct fuse_node *fnp; 99 int error; 100 int allocated = 0; 101 102 KKASSERT(dfnp->type == VDIR); 103 if (vtyp == VBLK || vtyp == VCHR || vtyp == VFIFO) 104 return EINVAL; 105 106 mtx_lock(&fmp->ino_lock); 107 fnp = RB_LOOKUP(fuse_node_tree, &fmp->node_head, ino); 108 if (fnp == NULL) { 109 fuse_node_new(fmp, ino, vtyp, &fnp); 110 allocated = 1; 111 } 112 mtx_unlock(&fmp->ino_lock); 113 114 error = fuse_node_vn(fnp, vpp); 115 if (error) { 116 if (allocated) 117 fuse_node_free(fmp, fnp); 118 } 119 return error; 120 } 121 122 /* 123 * Returns exclusively locked vp 124 */ 125 int 126 fuse_node_vn(struct fuse_node *fnp, struct vnode **vpp) 127 { 128 struct mount *mp = fnp->fmp->mp; 129 struct vnode *vp; 130 struct vnode *newvp; 131 int error; 132 133 newvp = NULL; 134 retry: 135 error = 0; 136 if (fnp->vp == NULL && newvp == NULL) { 137 error = getnewvnode(VT_FUSE, mp, &newvp, 138 VLKTIMEOUT, LK_CANRECURSE); 139 if (error) 140 return error; 141 } 142 143 mtx_lock(&fnp->node_lock); 144 145 /* 146 * Check case where vp is already assigned 147 */ 148 vp = fnp->vp; 149 if (vp) { 150 vhold(vp); 151 mtx_unlock(&fnp->node_lock); 152 error = vget(vp, LK_EXCLUSIVE | LK_RETRY); 153 vdrop(vp); 154 155 if (error) 156 goto retry; 157 if (fnp->vp != vp) { 158 vput(vp); 159 goto retry; 160 } 161 162 *vpp = vp; 163 164 if (newvp) { 165 newvp->v_type = VBAD; 166 vx_put(newvp); 167 } 168 169 return 0; 170 } 171 172 /* 173 * Assign new vp, release the node lock 174 */ 175 if (newvp == NULL) { 176 mtx_unlock(&fnp->node_lock); 177 goto retry; 178 } 179 180 fnp->vp = newvp; 181 mtx_unlock(&fnp->node_lock); 182 vp = newvp; 183 184 /* 185 * Finish setting up vp (vp is held exclusively + vx) 186 */ 187 vp->v_type = fnp->type; 188 vp->v_data = fnp; 189 190 switch (vp->v_type) { 191 case VREG: 192 vinitvmio(vp, fnp->size, FUSE_BLKSIZE, -1); 193 break; 194 case VDIR: 195 break; 196 case VBLK: 197 case VCHR: 198 KKASSERT(0); 199 vp->v_ops = &mp->mnt_vn_spec_ops; 200 addaliasu(vp, umajor(0), uminor(0)); /* XXX CUSE */ 201 break; 202 case VLNK: 203 break; 204 case VSOCK: 205 break; 206 case VFIFO: 207 KKASSERT(0); 208 case VDATABASE: 209 break; 210 default: 211 KKASSERT(0); 212 } 213 214 vx_downgrade(vp); /* VX to normal, is still exclusive */ 215 216 *vpp = vp; 217 218 return error; 219 } 220 221 int 222 fuse_node_truncate(struct fuse_node *fnp, size_t oldsize, size_t newsize) 223 { 224 struct vnode *vp = fnp->vp; 225 int error; 226 227 fuse_dbg("ino=%ju update size %ju -> %ju\n", 228 fnp->ino, oldsize, newsize); 229 230 fnp->attr.va_size = fnp->size = newsize; 231 232 if (newsize < oldsize) 233 error = nvtruncbuf(vp, newsize, FUSE_BLKSIZE, -1, 0); 234 else 235 error = nvextendbuf(vp, oldsize, newsize, FUSE_BLKSIZE, 236 FUSE_BLKSIZE, -1, -1, 0); 237 return error; 238 } 239 240 void 241 fuse_node_init(void) 242 { 243 fuse_node_objcache = objcache_create("fuse_node", 0, 0, 244 NULL, NULL, NULL, 245 objcache_malloc_alloc_zero, objcache_malloc_free, &fuse_node_args); 246 } 247 248 void 249 fuse_node_cleanup(void) 250 { 251 objcache_destroy(fuse_node_objcache); 252 } 253