1--source include/have_rocksdb.inc
2
3--echo #---------------------------
4--echo # ten threads inserting simultaneously with increment > 1
5--echo # Issue #390
6--echo #---------------------------
7
8# Run 10 simulatenous threads each inserting 10,000 rows
9let $num_threads = 10;
10let $num_rows_per_thread = 100000;
11
12# Create the table with an AUTO_INCREMENT primary key and a separate colum
13# to store which thread created the row
14CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb;
15
16# For each thread...
17# 1) set up a connection
18# 2) create a file that can be used for LOAD DATA INFILE ...
19let $i = `SELECT $num_threads`;
20while ($i > 0)
21{
22  dec $i;
23
24  # Set up connection
25  connect (con$i, localhost, root,,);
26
27  # Set up the auto_increment_* variables for each thread
28  eval SET auto_increment_increment = 100;
29  eval SET auto_increment_offset = $i + 1;
30  let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`;
31
32  # Pass variables into perl
33  let ROCKSDB_INFILE = $file;
34  let ROCKSDB_THREAD = `SELECT $i`;
35  let ROCKSDB_ROWS_PER_THREAD = `SELECT $num_rows_per_thread`;
36
37  # Create a file to load
38  perl;
39  my $fn = $ENV{'ROCKSDB_INFILE'};
40  my $thr = $ENV{'ROCKSDB_THREAD'};
41  my $num = $ENV{'ROCKSDB_ROWS_PER_THREAD'};
42  open(my $fh, '>>', $fn) || die "perl open($fn): $!";
43  binmode $fh;
44  for (my $ii = 0; $ii < $num; $ii++)
45  {
46    print $fh "\\N\t$thr\n"
47  }
48  close($fh);
49  EOF
50}
51
52# For each connection start the LOAD DATA INFILE in the background
53connection default;
54let $i = `SELECT $num_threads`;
55while ($i > 0)
56{
57  dec $i;
58
59  connection con$i;
60  let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`;
61  --disable_query_log
62  --echo LOAD DATA INFILE <input_file> INTO TABLE t1;
63  send_eval LOAD DATA INFILE '$file' INTO TABLE t1;
64  --enable_query_log
65}
66
67# Reap each connection's background result
68connection default;
69let $i = `SELECT $num_threads`;
70while ($i > 0)
71{
72  dec $i;
73
74  connection con$i;
75  reap;
76}
77
78# Make sure we have the required number of rows
79connection default;
80SELECT COUNT(*) FROM t1;
81SELECT thr, COUNT(pk) FROM t1 GROUP BY thr;
82
83# Cleanup the connection and file used for LOAD DATA INFILE
84let $i = `SELECT $num_threads`;
85while ($i > 0)
86{
87  dec $i;
88
89  disconnect con$i;
90  let $file = `SELECT CONCAT(@@datadir, "test_insert_", "$i", ".txt")`;
91  remove_file $file;
92}
93
94# Validate each row.  For each row, the created 'thr' column shows which
95# thread created the row.  The pk that was automatically generated should
96# therefore match a certain pattern.  For thread 0, the pk should be in
97# the sequence [1, 101, 201, 301, ...]; for thread 1, it should be in the
98# sequence [2, 102, 202, 302, ...], etc.  The pk for each row should be
99# smallest value in the sequence for thread 'thr' that is greater than
100# the pk in the previous row.
101let $file = `SELECT CONCAT(@@datadir, "test_export.txt")`;
102--disable_query_log
103--echo SELECT * FROM t1 ORDER BY pk INTO OUTFILE <output_file>;
104eval SELECT * FROM t1 ORDER BY pk INTO OUTFILE "$file";
105--enable_query_log
106
107let ROCKSDB_OUTFILE = $file;
108
109perl;
110my $fn = $ENV{'ROCKSDB_OUTFILE'};
111my $last_pk = 0;
112open(my $fh, '<', $fn) || die "perl open($fn): $!";
113while (<$fh>)
114{
115  if ($_ =~ m/^(.*)\t(.*)$/)
116  {
117    my $pk = $1;
118    my $thr = $2;
119
120    my $expected_pk = int($last_pk / 100) * 100 + ($thr + 1);
121    $expected_pk += 100 if $expected_pk <= $last_pk;
122
123    if ($expected_pk != $pk)
124    {
125      die "Incorrect next pk ($pk); expected $expected_pk (previous: $last_pk)"
126    }
127
128    $last_pk = $pk;
129  }
130  else
131  {
132    die "output file has incorrect format: $_";
133  }
134}
135print stdout "All pk values matched their expected values\n";
136EOF
137
138remove_file $file;
139
140# Drop the table to finally clean up
141DROP TABLE t1;
142
143