1#!/bin/ksh
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
16# Copyright (c) 2018 Datto Inc.
17#
18
19. $STF_SUITE/include/libtest.shlib
20. $STF_SUITE/tests/functional/rsend/rsend.kshlib
21
22#
23# Description:
24# Verify incremental receive properly handles objects with changed
25# dnode slot count.
26#
27# Strategy:
28# 1. Populate a dataset with 1k byte dnodes and snapshot
29# 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects
30#    get recycled numbers and formerly "interior" dnode slots get assigned
31#    to new objects
32# 3. Remove objects, set dnodesize=2k, and remount dataset so new objects
33#    overlap with recently recycled and formerly "normal" dnode slots get
34#    assigned to new objects
35# 4. Create an empty file and add xattrs to it to exercise reclaiming a
36#    dnode that requires more than 1 slot for its bonus buffer (Zol #7433)
37# 5. Generate initial and incremental streams
38# 6. Verify initial and incremental streams can be received
39#
40
41verify_runnable "both"
42
43log_assert "Verify incremental receive handles objects with changed dnode size"
44
45function cleanup
46{
47	rm -f $BACKDIR/fs-dn-legacy
48	rm -f $BACKDIR/fs-dn-1k
49	rm -f $BACKDIR/fs-dn-2k
50	rm -f $BACKDIR/fs-attr
51
52	datasetexists $POOL/fs && destroy_dataset $POOL/fs -rR
53	datasetexists $POOL/newfs && destroy_dataset $POOL/newfs -rR
54}
55
56log_onexit cleanup
57
58# 1. Populate a dataset with 1k byte dnodes and snapshot
59log_must zfs create -o dnodesize=1k $POOL/fs
60log_must mk_files 200 262144 0 $POOL/fs
61log_must zfs snapshot $POOL/fs@a
62
63# 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects
64#    get recycled numbers and formerly "interior" dnode slots get assigned
65#    to new objects
66rm /$POOL/fs/*
67
68log_must zfs unmount $POOL/fs
69log_must zfs set dnodesize=legacy $POOL/fs
70log_must zfs mount $POOL/fs
71
72log_must mk_files 200 262144 0 $POOL/fs
73log_must zfs snapshot $POOL/fs@b
74
75# 3. Remove objects, set dnodesize=2k, and remount dataset so new objects
76#    overlap with recently recycled and formerly "normal" dnode slots get
77#    assigned to new objects
78rm /$POOL/fs/*
79
80log_must zfs unmount $POOL/fs
81log_must zfs set dnodesize=2k $POOL/fs
82log_must zfs mount $POOL/fs
83
84log_must touch /$POOL/fs/attrs
85mk_files 200 262144 0 $POOL/fs
86log_must zfs snapshot $POOL/fs@c
87
88# 4. Create an empty file and add xattrs to it to exercise reclaiming a
89#    dnode that requires more than 1 slot for its bonus buffer (Zol #7433)
90log_must zfs set compression=on xattr=sa $POOL/fs
91log_must eval "python3 -c 'print \"a\" * 512' |
92    set_xattr_stdin bigval /$POOL/fs/attrs"
93log_must zfs snapshot $POOL/fs@d
94
95# 5. Generate initial and incremental streams
96log_must eval "zfs send $POOL/fs@a > $BACKDIR/fs-dn-1k"
97log_must eval "zfs send -i $POOL/fs@a $POOL/fs@b > $BACKDIR/fs-dn-legacy"
98log_must eval "zfs send -i $POOL/fs@b $POOL/fs@c > $BACKDIR/fs-dn-2k"
99log_must eval "zfs send -i $POOL/fs@c $POOL/fs@d > $BACKDIR/fs-attr"
100
101# 6. Verify initial and incremental streams can be received
102log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-1k"
103log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-legacy"
104log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-2k"
105log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-attr"
106
107log_pass "Verify incremental receive handles objects with changed dnode size"
108