1*eda14cbcSMatt Macy /* 2*eda14cbcSMatt Macy * Copyright (c) 2020 iXsystems, Inc. 3*eda14cbcSMatt Macy * All rights reserved. 4*eda14cbcSMatt Macy * 5*eda14cbcSMatt Macy * Redistribution and use in source and binary forms, with or without 6*eda14cbcSMatt Macy * modification, are permitted provided that the following conditions 7*eda14cbcSMatt Macy * are met: 8*eda14cbcSMatt Macy * 1. Redistributions of source code must retain the above copyright 9*eda14cbcSMatt Macy * notice, this list of conditions and the following disclaimer. 10*eda14cbcSMatt Macy * 2. Redistributions in binary form must reproduce the above copyright 11*eda14cbcSMatt Macy * notice, this list of conditions and the following disclaimer in the 12*eda14cbcSMatt Macy * documentation and/or other materials provided with the distribution. 13*eda14cbcSMatt Macy * 14*eda14cbcSMatt Macy * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15*eda14cbcSMatt Macy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16*eda14cbcSMatt Macy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17*eda14cbcSMatt Macy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18*eda14cbcSMatt Macy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19*eda14cbcSMatt Macy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20*eda14cbcSMatt Macy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21*eda14cbcSMatt Macy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22*eda14cbcSMatt Macy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23*eda14cbcSMatt Macy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24*eda14cbcSMatt Macy * SUCH DAMAGE. 25*eda14cbcSMatt Macy * 26*eda14cbcSMatt Macy */ 27*eda14cbcSMatt Macy 28*eda14cbcSMatt Macy #include <sys/cdefs.h> 29*eda14cbcSMatt Macy __FBSDID("$FreeBSD$"); 30*eda14cbcSMatt Macy 31*eda14cbcSMatt Macy #include <sys/types.h> 32*eda14cbcSMatt Macy #include <sys/param.h> 33*eda14cbcSMatt Macy #include <sys/dmu.h> 34*eda14cbcSMatt Macy #include <sys/dmu_impl.h> 35*eda14cbcSMatt Macy #include <sys/dmu_tx.h> 36*eda14cbcSMatt Macy #include <sys/dbuf.h> 37*eda14cbcSMatt Macy #include <sys/dnode.h> 38*eda14cbcSMatt Macy #include <sys/zfs_context.h> 39*eda14cbcSMatt Macy #include <sys/dmu_objset.h> 40*eda14cbcSMatt Macy #include <sys/dmu_traverse.h> 41*eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 42*eda14cbcSMatt Macy #include <sys/dsl_dir.h> 43*eda14cbcSMatt Macy #include <sys/dsl_pool.h> 44*eda14cbcSMatt Macy #include <sys/dsl_synctask.h> 45*eda14cbcSMatt Macy #include <sys/dsl_prop.h> 46*eda14cbcSMatt Macy #include <sys/dmu_zfetch.h> 47*eda14cbcSMatt Macy #include <sys/zfs_ioctl.h> 48*eda14cbcSMatt Macy #include <sys/zap.h> 49*eda14cbcSMatt Macy #include <sys/zio_checksum.h> 50*eda14cbcSMatt Macy #include <sys/zio_compress.h> 51*eda14cbcSMatt Macy #include <sys/sa.h> 52*eda14cbcSMatt Macy #include <sys/zfeature.h> 53*eda14cbcSMatt Macy #include <sys/abd.h> 54*eda14cbcSMatt Macy #include <sys/zfs_rlock.h> 55*eda14cbcSMatt Macy #include <sys/racct.h> 56*eda14cbcSMatt Macy #include <sys/vm.h> 57*eda14cbcSMatt Macy #include <sys/zfs_znode.h> 58*eda14cbcSMatt Macy #include <sys/zfs_vnops.h> 59*eda14cbcSMatt Macy 60*eda14cbcSMatt Macy #include <sys/ccompat.h> 61*eda14cbcSMatt Macy 62*eda14cbcSMatt Macy #ifndef IDX_TO_OFF 63*eda14cbcSMatt Macy #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT) 64*eda14cbcSMatt Macy #endif 65*eda14cbcSMatt Macy 66*eda14cbcSMatt Macy #if __FreeBSD_version < 1300051 67*eda14cbcSMatt Macy #define VM_ALLOC_BUSY_FLAGS VM_ALLOC_NOBUSY 68*eda14cbcSMatt Macy #else 69*eda14cbcSMatt Macy #define VM_ALLOC_BUSY_FLAGS VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY 70*eda14cbcSMatt Macy #endif 71*eda14cbcSMatt Macy 72*eda14cbcSMatt Macy 73*eda14cbcSMatt Macy #if __FreeBSD_version < 1300072 74*eda14cbcSMatt Macy #define dmu_page_lock(m) vm_page_lock(m) 75*eda14cbcSMatt Macy #define dmu_page_unlock(m) vm_page_unlock(m) 76*eda14cbcSMatt Macy #else 77*eda14cbcSMatt Macy #define dmu_page_lock(m) 78*eda14cbcSMatt Macy #define dmu_page_unlock(m) 79*eda14cbcSMatt Macy #endif 80*eda14cbcSMatt Macy 81*eda14cbcSMatt Macy static int 82*eda14cbcSMatt Macy dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 83*eda14cbcSMatt Macy uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 84*eda14cbcSMatt Macy { 85*eda14cbcSMatt Macy dnode_t *dn; 86*eda14cbcSMatt Macy int err; 87*eda14cbcSMatt Macy 88*eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 89*eda14cbcSMatt Macy if (err) 90*eda14cbcSMatt Macy return (err); 91*eda14cbcSMatt Macy 92*eda14cbcSMatt Macy err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 93*eda14cbcSMatt Macy numbufsp, dbpp, DMU_READ_PREFETCH); 94*eda14cbcSMatt Macy 95*eda14cbcSMatt Macy dnode_rele(dn, FTAG); 96*eda14cbcSMatt Macy 97*eda14cbcSMatt Macy return (err); 98*eda14cbcSMatt Macy } 99*eda14cbcSMatt Macy 100*eda14cbcSMatt Macy int 101*eda14cbcSMatt Macy dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 102*eda14cbcSMatt Macy vm_page_t *ma, dmu_tx_t *tx) 103*eda14cbcSMatt Macy { 104*eda14cbcSMatt Macy dmu_buf_t **dbp; 105*eda14cbcSMatt Macy struct sf_buf *sf; 106*eda14cbcSMatt Macy int numbufs, i; 107*eda14cbcSMatt Macy int err; 108*eda14cbcSMatt Macy 109*eda14cbcSMatt Macy if (size == 0) 110*eda14cbcSMatt Macy return (0); 111*eda14cbcSMatt Macy 112*eda14cbcSMatt Macy err = dmu_buf_hold_array(os, object, offset, size, 113*eda14cbcSMatt Macy FALSE, FTAG, &numbufs, &dbp); 114*eda14cbcSMatt Macy if (err) 115*eda14cbcSMatt Macy return (err); 116*eda14cbcSMatt Macy 117*eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 118*eda14cbcSMatt Macy int tocpy, copied, thiscpy; 119*eda14cbcSMatt Macy int bufoff; 120*eda14cbcSMatt Macy dmu_buf_t *db = dbp[i]; 121*eda14cbcSMatt Macy caddr_t va; 122*eda14cbcSMatt Macy 123*eda14cbcSMatt Macy ASSERT(size > 0); 124*eda14cbcSMatt Macy ASSERT3U(db->db_size, >=, PAGESIZE); 125*eda14cbcSMatt Macy 126*eda14cbcSMatt Macy bufoff = offset - db->db_offset; 127*eda14cbcSMatt Macy tocpy = (int)MIN(db->db_size - bufoff, size); 128*eda14cbcSMatt Macy 129*eda14cbcSMatt Macy ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 130*eda14cbcSMatt Macy 131*eda14cbcSMatt Macy if (tocpy == db->db_size) 132*eda14cbcSMatt Macy dmu_buf_will_fill(db, tx); 133*eda14cbcSMatt Macy else 134*eda14cbcSMatt Macy dmu_buf_will_dirty(db, tx); 135*eda14cbcSMatt Macy 136*eda14cbcSMatt Macy for (copied = 0; copied < tocpy; copied += PAGESIZE) { 137*eda14cbcSMatt Macy ASSERT3U(ptoa((*ma)->pindex), ==, 138*eda14cbcSMatt Macy db->db_offset + bufoff); 139*eda14cbcSMatt Macy thiscpy = MIN(PAGESIZE, tocpy - copied); 140*eda14cbcSMatt Macy va = zfs_map_page(*ma, &sf); 141*eda14cbcSMatt Macy bcopy(va, (char *)db->db_data + bufoff, thiscpy); 142*eda14cbcSMatt Macy zfs_unmap_page(sf); 143*eda14cbcSMatt Macy ma += 1; 144*eda14cbcSMatt Macy bufoff += PAGESIZE; 145*eda14cbcSMatt Macy } 146*eda14cbcSMatt Macy 147*eda14cbcSMatt Macy if (tocpy == db->db_size) 148*eda14cbcSMatt Macy dmu_buf_fill_done(db, tx); 149*eda14cbcSMatt Macy 150*eda14cbcSMatt Macy offset += tocpy; 151*eda14cbcSMatt Macy size -= tocpy; 152*eda14cbcSMatt Macy } 153*eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 154*eda14cbcSMatt Macy return (err); 155*eda14cbcSMatt Macy } 156*eda14cbcSMatt Macy 157*eda14cbcSMatt Macy int 158*eda14cbcSMatt Macy dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count, 159*eda14cbcSMatt Macy int *rbehind, int *rahead, int last_size) 160*eda14cbcSMatt Macy { 161*eda14cbcSMatt Macy struct sf_buf *sf; 162*eda14cbcSMatt Macy vm_object_t vmobj; 163*eda14cbcSMatt Macy vm_page_t m; 164*eda14cbcSMatt Macy dmu_buf_t **dbp; 165*eda14cbcSMatt Macy dmu_buf_t *db; 166*eda14cbcSMatt Macy caddr_t va; 167*eda14cbcSMatt Macy int numbufs, i; 168*eda14cbcSMatt Macy int bufoff, pgoff, tocpy; 169*eda14cbcSMatt Macy int mi, di; 170*eda14cbcSMatt Macy int err; 171*eda14cbcSMatt Macy 172*eda14cbcSMatt Macy ASSERT3U(ma[0]->pindex + count - 1, ==, ma[count - 1]->pindex); 173*eda14cbcSMatt Macy ASSERT(last_size <= PAGE_SIZE); 174*eda14cbcSMatt Macy 175*eda14cbcSMatt Macy err = dmu_buf_hold_array(os, object, IDX_TO_OFF(ma[0]->pindex), 176*eda14cbcSMatt Macy IDX_TO_OFF(count - 1) + last_size, TRUE, FTAG, &numbufs, &dbp); 177*eda14cbcSMatt Macy if (err != 0) 178*eda14cbcSMatt Macy return (err); 179*eda14cbcSMatt Macy 180*eda14cbcSMatt Macy #ifdef ZFS_DEBUG 181*eda14cbcSMatt Macy IMPLY(last_size < PAGE_SIZE, *rahead == 0); 182*eda14cbcSMatt Macy if (dbp[0]->db_offset != 0 || numbufs > 1) { 183*eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 184*eda14cbcSMatt Macy ASSERT(ISP2(dbp[i]->db_size)); 185*eda14cbcSMatt Macy ASSERT((dbp[i]->db_offset % dbp[i]->db_size) == 0); 186*eda14cbcSMatt Macy ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size); 187*eda14cbcSMatt Macy } 188*eda14cbcSMatt Macy } 189*eda14cbcSMatt Macy #endif 190*eda14cbcSMatt Macy 191*eda14cbcSMatt Macy vmobj = ma[0]->object; 192*eda14cbcSMatt Macy zfs_vmobject_wlock_12(vmobj); 193*eda14cbcSMatt Macy 194*eda14cbcSMatt Macy db = dbp[0]; 195*eda14cbcSMatt Macy for (i = 0; i < *rbehind; i++) { 196*eda14cbcSMatt Macy m = vm_page_grab_unlocked(vmobj, ma[0]->pindex - 1 - i, 197*eda14cbcSMatt Macy VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_BUSY_FLAGS); 198*eda14cbcSMatt Macy if (m == NULL) 199*eda14cbcSMatt Macy break; 200*eda14cbcSMatt Macy if (!vm_page_none_valid(m)) { 201*eda14cbcSMatt Macy ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL); 202*eda14cbcSMatt Macy vm_page_do_sunbusy(m); 203*eda14cbcSMatt Macy break; 204*eda14cbcSMatt Macy } 205*eda14cbcSMatt Macy ASSERT(m->dirty == 0); 206*eda14cbcSMatt Macy ASSERT(!pmap_page_is_write_mapped(m)); 207*eda14cbcSMatt Macy 208*eda14cbcSMatt Macy ASSERT(db->db_size > PAGE_SIZE); 209*eda14cbcSMatt Macy bufoff = IDX_TO_OFF(m->pindex) % db->db_size; 210*eda14cbcSMatt Macy va = zfs_map_page(m, &sf); 211*eda14cbcSMatt Macy bcopy((char *)db->db_data + bufoff, va, PAGESIZE); 212*eda14cbcSMatt Macy zfs_unmap_page(sf); 213*eda14cbcSMatt Macy vm_page_valid(m); 214*eda14cbcSMatt Macy dmu_page_lock(m); 215*eda14cbcSMatt Macy if ((m->busy_lock & VPB_BIT_WAITERS) != 0) 216*eda14cbcSMatt Macy vm_page_activate(m); 217*eda14cbcSMatt Macy else 218*eda14cbcSMatt Macy vm_page_deactivate(m); 219*eda14cbcSMatt Macy dmu_page_unlock(m); 220*eda14cbcSMatt Macy vm_page_do_sunbusy(m); 221*eda14cbcSMatt Macy } 222*eda14cbcSMatt Macy *rbehind = i; 223*eda14cbcSMatt Macy 224*eda14cbcSMatt Macy bufoff = IDX_TO_OFF(ma[0]->pindex) % db->db_size; 225*eda14cbcSMatt Macy pgoff = 0; 226*eda14cbcSMatt Macy for (mi = 0, di = 0; mi < count && di < numbufs; ) { 227*eda14cbcSMatt Macy if (pgoff == 0) { 228*eda14cbcSMatt Macy m = ma[mi]; 229*eda14cbcSMatt Macy if (m != bogus_page) { 230*eda14cbcSMatt Macy vm_page_assert_xbusied(m); 231*eda14cbcSMatt Macy ASSERT(vm_page_none_valid(m)); 232*eda14cbcSMatt Macy ASSERT(m->dirty == 0); 233*eda14cbcSMatt Macy ASSERT(!pmap_page_is_write_mapped(m)); 234*eda14cbcSMatt Macy va = zfs_map_page(m, &sf); 235*eda14cbcSMatt Macy } 236*eda14cbcSMatt Macy } 237*eda14cbcSMatt Macy if (bufoff == 0) 238*eda14cbcSMatt Macy db = dbp[di]; 239*eda14cbcSMatt Macy 240*eda14cbcSMatt Macy if (m != bogus_page) { 241*eda14cbcSMatt Macy ASSERT3U(IDX_TO_OFF(m->pindex) + pgoff, ==, 242*eda14cbcSMatt Macy db->db_offset + bufoff); 243*eda14cbcSMatt Macy } 244*eda14cbcSMatt Macy 245*eda14cbcSMatt Macy /* 246*eda14cbcSMatt Macy * We do not need to clamp the copy size by the file 247*eda14cbcSMatt Macy * size as the last block is zero-filled beyond the 248*eda14cbcSMatt Macy * end of file anyway. 249*eda14cbcSMatt Macy */ 250*eda14cbcSMatt Macy tocpy = MIN(db->db_size - bufoff, PAGESIZE - pgoff); 251*eda14cbcSMatt Macy if (m != bogus_page) 252*eda14cbcSMatt Macy bcopy((char *)db->db_data + bufoff, va + pgoff, tocpy); 253*eda14cbcSMatt Macy 254*eda14cbcSMatt Macy pgoff += tocpy; 255*eda14cbcSMatt Macy ASSERT(pgoff <= PAGESIZE); 256*eda14cbcSMatt Macy if (pgoff == PAGESIZE) { 257*eda14cbcSMatt Macy if (m != bogus_page) { 258*eda14cbcSMatt Macy zfs_unmap_page(sf); 259*eda14cbcSMatt Macy vm_page_valid(m); 260*eda14cbcSMatt Macy } 261*eda14cbcSMatt Macy ASSERT(mi < count); 262*eda14cbcSMatt Macy mi++; 263*eda14cbcSMatt Macy pgoff = 0; 264*eda14cbcSMatt Macy } 265*eda14cbcSMatt Macy 266*eda14cbcSMatt Macy bufoff += tocpy; 267*eda14cbcSMatt Macy ASSERT(bufoff <= db->db_size); 268*eda14cbcSMatt Macy if (bufoff == db->db_size) { 269*eda14cbcSMatt Macy ASSERT(di < numbufs); 270*eda14cbcSMatt Macy di++; 271*eda14cbcSMatt Macy bufoff = 0; 272*eda14cbcSMatt Macy } 273*eda14cbcSMatt Macy } 274*eda14cbcSMatt Macy 275*eda14cbcSMatt Macy #ifdef ZFS_DEBUG 276*eda14cbcSMatt Macy /* 277*eda14cbcSMatt Macy * Three possibilities: 278*eda14cbcSMatt Macy * - last requested page ends at a buffer boundary and , thus, 279*eda14cbcSMatt Macy * all pages and buffers have been iterated; 280*eda14cbcSMatt Macy * - all requested pages are filled, but the last buffer 281*eda14cbcSMatt Macy * has not been exhausted; 282*eda14cbcSMatt Macy * the read-ahead is possible only in this case; 283*eda14cbcSMatt Macy * - all buffers have been read, but the last page has not been 284*eda14cbcSMatt Macy * fully filled; 285*eda14cbcSMatt Macy * this is only possible if the file has only a single buffer 286*eda14cbcSMatt Macy * with a size that is not a multiple of the page size. 287*eda14cbcSMatt Macy */ 288*eda14cbcSMatt Macy if (mi == count) { 289*eda14cbcSMatt Macy ASSERT(di >= numbufs - 1); 290*eda14cbcSMatt Macy IMPLY(*rahead != 0, di == numbufs - 1); 291*eda14cbcSMatt Macy IMPLY(*rahead != 0, bufoff != 0); 292*eda14cbcSMatt Macy ASSERT(pgoff == 0); 293*eda14cbcSMatt Macy } 294*eda14cbcSMatt Macy if (di == numbufs) { 295*eda14cbcSMatt Macy ASSERT(mi >= count - 1); 296*eda14cbcSMatt Macy ASSERT(*rahead == 0); 297*eda14cbcSMatt Macy IMPLY(pgoff == 0, mi == count); 298*eda14cbcSMatt Macy if (pgoff != 0) { 299*eda14cbcSMatt Macy ASSERT(mi == count - 1); 300*eda14cbcSMatt Macy ASSERT((dbp[0]->db_size & PAGE_MASK) != 0); 301*eda14cbcSMatt Macy } 302*eda14cbcSMatt Macy } 303*eda14cbcSMatt Macy #endif 304*eda14cbcSMatt Macy if (pgoff != 0) { 305*eda14cbcSMatt Macy ASSERT(m != bogus_page); 306*eda14cbcSMatt Macy bzero(va + pgoff, PAGESIZE - pgoff); 307*eda14cbcSMatt Macy zfs_unmap_page(sf); 308*eda14cbcSMatt Macy vm_page_valid(m); 309*eda14cbcSMatt Macy } 310*eda14cbcSMatt Macy 311*eda14cbcSMatt Macy for (i = 0; i < *rahead; i++) { 312*eda14cbcSMatt Macy m = vm_page_grab_unlocked(vmobj, ma[count - 1]->pindex + 1 + i, 313*eda14cbcSMatt Macy VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_BUSY_FLAGS); 314*eda14cbcSMatt Macy if (m == NULL) 315*eda14cbcSMatt Macy break; 316*eda14cbcSMatt Macy if (!vm_page_none_valid(m)) { 317*eda14cbcSMatt Macy ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL); 318*eda14cbcSMatt Macy vm_page_do_sunbusy(m); 319*eda14cbcSMatt Macy break; 320*eda14cbcSMatt Macy } 321*eda14cbcSMatt Macy ASSERT(m->dirty == 0); 322*eda14cbcSMatt Macy ASSERT(!pmap_page_is_mapped(m)); 323*eda14cbcSMatt Macy 324*eda14cbcSMatt Macy ASSERT(db->db_size > PAGE_SIZE); 325*eda14cbcSMatt Macy bufoff = IDX_TO_OFF(m->pindex) % db->db_size; 326*eda14cbcSMatt Macy tocpy = MIN(db->db_size - bufoff, PAGESIZE); 327*eda14cbcSMatt Macy va = zfs_map_page(m, &sf); 328*eda14cbcSMatt Macy bcopy((char *)db->db_data + bufoff, va, tocpy); 329*eda14cbcSMatt Macy if (tocpy < PAGESIZE) { 330*eda14cbcSMatt Macy ASSERT(i == *rahead - 1); 331*eda14cbcSMatt Macy ASSERT((db->db_size & PAGE_MASK) != 0); 332*eda14cbcSMatt Macy bzero(va + tocpy, PAGESIZE - tocpy); 333*eda14cbcSMatt Macy } 334*eda14cbcSMatt Macy zfs_unmap_page(sf); 335*eda14cbcSMatt Macy vm_page_valid(m); 336*eda14cbcSMatt Macy dmu_page_lock(m); 337*eda14cbcSMatt Macy if ((m->busy_lock & VPB_BIT_WAITERS) != 0) 338*eda14cbcSMatt Macy vm_page_activate(m); 339*eda14cbcSMatt Macy else 340*eda14cbcSMatt Macy vm_page_deactivate(m); 341*eda14cbcSMatt Macy dmu_page_unlock(m); 342*eda14cbcSMatt Macy vm_page_do_sunbusy(m); 343*eda14cbcSMatt Macy } 344*eda14cbcSMatt Macy *rahead = i; 345*eda14cbcSMatt Macy zfs_vmobject_wunlock_12(vmobj); 346*eda14cbcSMatt Macy 347*eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 348*eda14cbcSMatt Macy return (0); 349*eda14cbcSMatt Macy } 350