1#!/usr/bin/env python
2#
3# Public Domain 2014-2018 MongoDB, Inc.
4# Public Domain 2008-2014 WiredTiger, Inc.
5#
6# This is free and unencumbered software released into the public domain.
7#
8# Anyone is free to copy, modify, publish, use, compile, sell, or
9# distribute this software, either in source code form or as a compiled
10# binary, for any purpose, commercial or non-commercial, and by any
11# means.
12#
13# In jurisdictions that recognize copyright laws, the author or authors
14# of this software dedicate any and all copyright interest in the
15# software to the public domain. We make this dedication for the benefit
16# of the public at large and to the detriment of our heirs and
17# successors. We intend this dedication to be an overt act of
18# relinquishment in perpetuity of all present and future rights to this
19# software under copyright law.
20#
21# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27# OTHER DEALINGS IN THE SOFTWARE.
28#
29
30# A workload with small cache, small internal and leaf page sizes, faster splits
31# and multiple threads inserting keys in random order. It stresses the page
32# splits in order to catch split races.
33#
34from runner import *
35from wiredtiger import *
36from workgen import *
37
38context = Context()
39# Connection configuration.
40conn_config = "cache_size=100MB,log=(enabled=false),statistics=[fast],statistics_log=(wait=1,json=false)"
41conn = wiredtiger_open("WT_TEST", "create," + conn_config)
42s = conn.open_session("")
43
44# Table configuration.
45table_config = "leaf_page_max=8k,internal_page_max=8k,leaf_item_max=1433,internal_item_max=3100,type=file,memory_page_max=1MB,split_deepen_min_child=100"
46tables = []
47table_count = 3
48for i in range(0, table_count):
49    tname = "file:test" + str(i)
50    table = Table(tname)
51    s.create(tname, 'key_format=S,value_format=S,' + table_config)
52    table.options.key_size = 64
53    table.options.value_size = 200
54    table.options.range = 100000000 # 100 million
55    tables.append(table)
56
57# Populate phase.
58populate_threads = 1
59icount = 50000
60# There are multiple tables to be filled during populate,
61# the icount is split between them all.
62pop_ops = Operation(Operation.OP_INSERT, tables[0])
63pop_ops = op_multi_table(pop_ops, tables)
64nops_per_thread = icount / (populate_threads * table_count)
65pop_thread = Thread(pop_ops * nops_per_thread)
66pop_workload = Workload(context, populate_threads * pop_thread)
67print('populate:')
68pop_workload.run(conn)
69
70# Run phase.
71ops = Operation(Operation.OP_INSERT, tables[0])
72ops = op_multi_table(ops, tables, False)
73thread0 = Thread(ops)
74
75workload = Workload(context, 20 * thread0)
76workload.options.report_interval=5
77workload.options.run_time=300
78print('Split stress workload running...')
79workload.run(conn)
80
81latency_filename = "WT_TEST/latency.out"
82latency.workload_latency(workload, latency_filename)
83conn.close()
84