1#!/usr/bin/env python
2#
3# Public Domain 2014-2018 MongoDB, Inc.
4# Public Domain 2008-2014 WiredTiger, Inc.
5#
6# This is free and unencumbered software released into the public domain.
7#
8# Anyone is free to copy, modify, publish, use, compile, sell, or
9# distribute this software, either in source code form or as a compiled
10# binary, for any purpose, commercial or non-commercial, and by any
11# means.
12#
13# In jurisdictions that recognize copyright laws, the author or authors
14# of this software dedicate any and all copyright interest in the
15# software to the public domain. We make this dedication for the benefit
16# of the public at large and to the detriment of our heirs and
17# successors. We intend this dedication to be an overt act of
18# relinquishment in perpetuity of all present and future rights to this
19# software under copyright law.
20#
21# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27# OTHER DEALINGS IN THE SOFTWARE.
28
29import fnmatch, os, shutil, sys
30from suite_subprocess import suite_subprocess
31import wiredtiger, wttest
32from wtscenario import make_scenarios
33
34# test_schema08.py
35#    Test schema operations on recovery.
36# Test all schema operations alter, create, drop, rename.
37# After doing the operation, create a backup copy of the directory,
38# walk the log recording each LSN, truncate the backup copy of the
39# log walking backward from the LSNs and then run recovery.
40class test_schema08(wttest.WiredTigerTestCase, suite_subprocess):
41    # We want to copy, truncate and run recovery so keep the log
42    # file small and don't pre-allocate any. We expect a small log.
43    conn_config = 'log=(enabled,archive=false,file_max=100k,prealloc=false)'
44    types = [
45        ('file', dict(uri='file:', use_cg=False, use_index=False)),
46        ('lsm', dict(uri='lsm:', use_cg=False, use_index=False)),
47        ('table-cg', dict(uri='table:', use_cg=True, use_index=False)),
48        ('table-index', dict(uri='table:', use_cg=False, use_index=True)),
49        ('table-simple', dict(uri='table:', use_cg=False, use_index=False)),
50    ]
51    ops = [
52        ('none', dict(schema_ops='none')),
53        ('alter', dict(schema_ops='alter')),
54        ('drop', dict(schema_ops='drop')),
55        ('rename', dict(schema_ops='rename')),
56    ]
57    ckpt = [
58        ('no_ckpt', dict(ckpt=False)),
59        ('with_ckpt', dict(ckpt=True)),
60    ]
61    scenarios = make_scenarios(types, ops, ckpt)
62    count = 0
63    lsns = []
64    backup_pfx = "BACKUP."
65
66    def do_alter(self, uri, suburi):
67        alter_param = 'cache_resident=true'
68        self.session.alter(uri, alter_param)
69        if suburi != None:
70            self.session.alter(suburi, alter_param)
71
72    def do_ops(self, uri, suburi):
73        if (self.schema_ops == 'none'):
74            return
75        if (self.schema_ops == 'alter'):
76            self.do_alter(uri, suburi)
77        elif (self.schema_ops == 'drop'):
78            self.session.drop(uri, None)
79        elif (self.schema_ops == 'rename'):
80            newuri = self.uri + "new-table"
81            self.session.rename(uri, newuri, None)
82
83    # Count actual log records in the log. Log cursors walk the individual
84    # operations of a transaction as well as the entire record. Skip counting
85    # any individual commit operations and only count entire records.
86    def find_logrecs(self):
87        self.count = 0
88        self.session.log_flush('sync=on')
89        c = self.session.open_cursor('log:', None, None)
90        self.lsns.append(0)
91        while c.next() == 0:
92            # lsn.file, lsn.offset, opcount
93            keys = c.get_key()
94            # We don't expect to need more than one log file. We only store
95            # the offsets in a list so assert lsn.file is 1.
96            self.assertTrue(keys[0] == 1)
97
98            # Only count whole records, which is when opcount is zero.
99            # If opcount is not zero it is an operation of a commit.
100            # Skip LSN 128, that is a system record and its existence
101            # is assumed within the system.
102            if keys[2] == 0 and keys[1] != 128:
103                self.count += 1
104                self.lsns.append(keys[1])
105        c.close()
106        self.pr("Find " + str(self.count) + " logrecs LSNS: ")
107        self.pr(str(self.lsns))
108
109    def make_backups(self):
110        # With the connection still open, copy files to the new directory.
111        # Make an initial copy as well as a copy for each LSN we save.
112        # Truncate the log to the appropriate offset as we make each copy.
113        olddir = "."
114        log1 = 'WiredTigerLog.0000000001'
115        for lsn in self.lsns:
116            newdir = self.backup_pfx + str(lsn)
117            shutil.rmtree(newdir, ignore_errors=True)
118            os.mkdir(newdir)
119            for fname in os.listdir(olddir):
120                fullname = os.path.join(olddir, fname)
121                # Skip lock file on Windows since it is locked
122                if os.path.isfile(fullname) and \
123                    "WiredTiger.lock" not in fullname and \
124                    "Tmplog" not in fullname and \
125                    "Preplog" not in fullname:
126                    shutil.copy(fullname, newdir)
127            # Truncate the file to the LSN offset.
128            # NOTE: This removes the record at that offset
129            # resulting in recovery running to just before
130            # that record.
131            if lsn != 0:
132                logf = os.path.join(newdir + '/' + log1)
133                f = open(logf, "r+")
134                f.truncate(lsn)
135                f.close()
136                # print "New size " + logf + ": " + str(os.path.getsize(logf))
137
138    def run_recovery(self, uri, suburi):
139        # With the connection still open, copy files to the new directory.
140        # Make an initial copy as well as a copy for each LSN we save.
141        # Truncate the log to the appropriate offset as we make each copy.
142        olddir = "."
143        for lsn in self.lsns:
144            newdir = self.backup_pfx + str(lsn)
145            outfile = newdir + '.txt'
146            self.runWt(['-R', '-h', newdir, 'list', '-v'], outfilename=outfile)
147
148    # Test that creating and dropping tables does not write individual
149    # log records.
150    def test_schema08_create(self):
151        self.count = 0
152        self.lsns = []
153        uri = self.uri + 'table0'
154        create_params = 'key_format=i,value_format=S,'
155
156        cgparam = ''
157        suburi = None
158        if self.use_cg or self.use_index:
159            cgparam = 'columns=(k,v),'
160        if self.use_cg:
161            cgparam += 'colgroups=(g0),'
162
163        # Create main table.
164        self.session.create(uri, create_params + cgparam)
165
166        # Checkpoint after the main table creation if wanted.
167        if self.ckpt:
168            self.session.checkpoint()
169
170        # Add in column group or index tables.
171        if self.use_cg:
172            # Create.
173            cgparam = 'columns=(v),'
174            suburi = 'colgroup:table0:g0'
175            self.session.create(suburi, cgparam)
176
177        if self.use_index:
178            # Create.
179            suburi = 'index:table0:i0'
180            self.session.create(suburi, cgparam)
181
182        self.do_ops(uri, suburi)
183        self.find_logrecs()
184        # print "Found " + str(self.count) + " log records"
185        self.make_backups()
186        self.run_recovery(uri, suburi)
187
188if __name__ == '__main__':
189    wttest.run()
190