1 /* Copyright (c) 2008, 2021, Oracle and/or its affiliates.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
22 
23 #include <NDBT.hpp>
24 #include <NDBT_Test.hpp>
25 #include <NdbRestarter.hpp>
26 
27 #define CHECKNOTNULL(p) if ((p) == NULL) {          \
28     ndbout << "Error at line " << __LINE__ << endl; \
29     NDB_ERR(trans->getNdbError());                  \
30     trans->close();                                 \
31     return NDBT_FAILED; }
32 
33 #define CHECKEQUAL(v, e) if ((e) != (v)) {            \
34     ndbout << "Error at line " << __LINE__ <<         \
35       " expected " << v << endl;                      \
36     NDB_ERR(trans->getNdbError());                    \
37     trans->close();                                   \
38     return NDBT_FAILED; }
39 
40 
41 /* Setup memory as a long Varchar with 2 bytes of
42  * length information
43  */
setLongVarchar(char * where,const char * what,Uint32 sz)44 Uint32 setLongVarchar(char* where, const char* what, Uint32 sz)
45 {
46   where[0]=sz & 0xff;
47   where[1]=(sz >> 8) & 0xff;
48   memcpy(&where[2], what, sz);
49   return (sz + 2);
50 }
51 
52 
53 /* Activate the given error insert in TC block
54  * This is used for error insertion where a TCKEYREQ
55  * is required to activate the error
56  */
activateErrorInsert(NdbTransaction * trans,const NdbRecord * record,const NdbDictionary::Table * tab,const char * buf,NdbRestarter * restarter,Uint32 val)57 int activateErrorInsert(NdbTransaction* trans,
58                         const NdbRecord* record,
59                         const NdbDictionary::Table* tab,
60                         const char* buf,
61                         NdbRestarter* restarter,
62                         Uint32 val)
63 {
64   /* We insert the error twice to avoid what appear to be
65    * races between the error insert and the subsequent
66    * tests
67    * Alternatively we could sleep here.
68    */
69   if (restarter->insertErrorInAllNodes(val) != 0){
70     g_err << "error insert 1 (" << val << ") failed" << endl;
71     return NDBT_FAILED;
72   }
73   if (restarter->insertErrorInAllNodes(val) != 0){
74     g_err << "error insert 2 (" << val << ") failed" << endl;
75     return NDBT_FAILED;
76   }
77 
78   NdbOperation* insert= trans->getNdbOperation(tab);
79 
80   CHECKNOTNULL(insert);
81 
82   CHECKEQUAL(0, insert->insertTuple());
83 
84   CHECKEQUAL(0, insert->equal((Uint32) 0,
85                               NdbDictionary::getValuePtr
86                               (record,
87                                buf,
88                                0)));
89   CHECKEQUAL(0, insert->setValue(1,
90                                  NdbDictionary::getValuePtr
91                                  (record,
92                                   buf,
93                                   1)));
94 
95   CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
96 
97   CHECKEQUAL(0, trans->getNdbError().code);
98 
99   return NDBT_OK;
100 }
101 
102 /* Test for correct behaviour using primary key operations
103  * when an NDBD node's SegmentedSection pool is exhausted.
104  */
testSegmentedSectionPk(NDBT_Context * ctx,NDBT_Step * step)105 int testSegmentedSectionPk(NDBT_Context* ctx, NDBT_Step* step){
106   /*
107    * Signal type       Exhausted @              How
108    * -----------------------------------------------------
109    * Long TCKEYREQ     Initial import           Consume + send
110    * Long TCKEYREQ     Initial import, not first
111    *                     TCKEYREQ in batch      Consume + send
112    * Long TCKEYREQ     Initial import, not last
113    *                     TCKEYREQ in batch      Consume + send
114    * No testing of short TCKEYREQ variants as they cannot be
115    * generated in mysql-5.1-telco-6.4+
116    * TODO : Add short variant testing to testUpgrade.
117    */
118 
119   /* We just run on one table */
120   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
121     return NDBT_OK;
122 
123   const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
124   const Uint32 maxKeyBytes= NDBT_Tables::MaxVarTypeKeyBytes;
125   const Uint32 maxAttrBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytes;
126   const Uint32 srcBuffBytes= MAX(maxKeyBytes,maxAttrBytes);
127   char smallKey[50];
128   char srcBuff[srcBuffBytes];
129   char smallRowBuf[maxRowBytes];
130   char bigKeyRowBuf[maxRowBytes];
131   char bigAttrRowBuf[maxRowBytes];
132 
133   /* Small key for hinting to same TC */
134   Uint32 smallKeySize= setLongVarchar(&smallKey[0],
135                                       "ShortKey",
136                                       8);
137 
138   /* Large value source */
139   memset(srcBuff, 'B', srcBuffBytes);
140 
141   const NdbRecord* record= ctx->getTab()->getDefaultRecord();
142 
143   /* Setup buffers
144    * Small row buffer with small key and small data
145    */
146   setLongVarchar(NdbDictionary::getValuePtr(record,
147                                             smallRowBuf,
148                                             0),
149                  "ShortKey",
150                  8);
151   NdbDictionary::setNull(record, smallRowBuf, 0, false);
152 
153   setLongVarchar(NdbDictionary::getValuePtr(record,
154                                             smallRowBuf,
155                                             1),
156                  "ShortData",
157                  9);
158   NdbDictionary::setNull(record, smallRowBuf, 1, false);
159 
160   /* Big key buffer with big key and small data*/
161   setLongVarchar(NdbDictionary::getValuePtr(record,
162                                             bigKeyRowBuf,
163                                             0),
164                  &srcBuff[0],
165                  maxKeyBytes);
166   NdbDictionary::setNull(record, bigKeyRowBuf, 0, false);
167 
168   setLongVarchar(NdbDictionary::getValuePtr(record,
169                                             bigKeyRowBuf,
170                                             1),
171                  "ShortData",
172                  9);
173   NdbDictionary::setNull(record, bigKeyRowBuf, 1, false);
174 
175   /* Big AttrInfo buffer with small key and big data */
176   setLongVarchar(NdbDictionary::getValuePtr(record,
177                                             bigAttrRowBuf,
178                                             0),
179                  "ShortKey",
180                  8);
181   NdbDictionary::setNull(record, bigAttrRowBuf, 0, false);
182 
183   setLongVarchar(NdbDictionary::getValuePtr(record,
184                                             bigAttrRowBuf,
185                                             1),
186                  &srcBuff[0],
187                  maxAttrBytes);
188   NdbDictionary::setNull(record, bigAttrRowBuf, 1, false);
189 
190   NdbRestarter restarter;
191   Ndb* pNdb= GETNDB(step);
192 
193   /* Start a transaction on a specific node */
194   NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
195                                                 &smallKey[0],
196                                                 smallKeySize);
197   CHECKNOTNULL(trans);
198 
199   /* Activate error insert 8065 in this transaction, limits
200    * any single import/append to 1 section
201    */
202   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
203                                           record,
204                                           ctx->getTab(),
205                                           smallRowBuf,
206                                           &restarter,
207                                           8065));
208 
209   /* Ok, let's try an insert with a key bigger than 1 section.
210    * Since it's part of the same transaction, it'll go via
211    * the same TC.
212    */
213   const NdbOperation* bigInsert = trans->insertTuple(record, bigKeyRowBuf);
214 
215   CHECKNOTNULL(bigInsert);
216 
217   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
218 
219   /* ZGET_DATABUF_ERR expected */
220   CHECKEQUAL(218, trans->getNdbError().code)
221 
222   trans->close();
223 
224   /* Ok, now a long TCKEYREQ to the same TC - this
225    * has slightly different abort handling since no other
226    * operations exist in this new transaction.
227    * We also change it so that import overflow occurs
228    * on the AttrInfo section
229    */
230   /* Start transaction on the same node */
231   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
232                                              &smallKey[0],
233                                              smallKeySize));
234 
235 
236   CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
237 
238   CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
239 
240   /* ZGET_DATABUF_ERR expected */
241   CHECKEQUAL(218, trans->getNdbError().code);
242 
243   trans->close();
244 
245   /* Ok, now a long TCKEYREQ where we run out of SegmentedSections
246    * on the first TCKEYREQ, but there are other TCKEYREQs following
247    * in the same batch.  Check that abort handling is correct
248    */
249     /* Start transaction on the same node */
250   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
251                                              &smallKey[0],
252                                              smallKeySize));
253   /* First op in batch, will cause overflow */
254   CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
255 
256   /* Second op in batch, what happens to it? */
257   const NdbOperation* secondOp;
258   CHECKNOTNULL(secondOp = trans->insertTuple(record, bigAttrRowBuf));
259 
260 
261   CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
262 
263   /* ZGET_DATABUF_ERR expected */
264   CHECKEQUAL(218, trans->getNdbError().code);
265 
266   trans->close();
267 
268   /* Now try with a 'short' TCKEYREQ, generated using the old Api
269    * with a big key value
270    */
271   /* Start transaction on the same node */
272   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
273                                              &smallKey[0],
274                                              smallKeySize));
275 
276   NdbOperation* bigInsertOldApi;
277   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
278 
279   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
280   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
281                                        NdbDictionary::getValuePtr
282                                        (record,
283                                         bigKeyRowBuf,
284                                         0)));
285   CHECKEQUAL(0, bigInsertOldApi->setValue(1,
286                                           NdbDictionary::getValuePtr
287                                           (record,
288                                            bigKeyRowBuf,
289                                            1)));
290 
291   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
292 
293   /* ZGET_DATABUF_ERR expected */
294   CHECKEQUAL(218, trans->getNdbError().code)
295 
296   trans->close();
297 
298   /* Now try with a 'short' TCKEYREQ, generated using the old Api
299    * with a big data value
300    */
301   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
302                                              &smallKey[0],
303                                              smallKeySize));
304 
305   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
306 
307   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
308   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
309                                        NdbDictionary::getValuePtr
310                                        (record,
311                                         bigAttrRowBuf,
312                                         0)));
313   CHECKEQUAL(0, bigInsertOldApi->setValue(1,
314                                           NdbDictionary::getValuePtr
315                                           (record,
316                                            bigAttrRowBuf,
317                                            1)));
318 
319   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
320 
321   /* ZGET_DATABUF_ERR expected */
322   CHECKEQUAL(218, trans->getNdbError().code)
323 
324   trans->close();
325 
326   // TODO : Add code to testUpgrade
327 #if 0
328   /*
329    * Short TCKEYREQ    KeyInfo accumulate       Consume + send long
330    *                     (TCKEYREQ + KEYINFO)
331    * Short TCKEYREQ    AttrInfo accumulate      Consume + send short key
332    *                                             + long AI
333    *                      (TCKEYREQ + ATTRINFO)
334    */
335   /* Change error insert so that next TCKEYREQ will grab
336    * all but one SegmentedSection so that we can then test SegmentedSection
337    * exhaustion when importing the Key/AttrInfo words from the
338    * TCKEYREQ signal itself.
339    */
340   restarter.insertErrorInAllNodes(8066);
341 
342 
343   /* Now a 'short' TCKEYREQ, there will be space to import the
344    * short key, but not the AttrInfo
345    */
346   /* Start transaction on same node */
347   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
348                                              &smallKey[0],
349                                              smallKeySize));
350 
351   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
352 
353   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
354   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
355                                        NdbDictionary::getValuePtr
356                                        (record,
357                                         smallRowBuf,
358                                         0)));
359   CHECKEQUAL(0, bigInsertOldApi->setValue(1, NdbDictionary::getValuePtr
360                                           (record,
361                                            smallRowBuf,
362                                            1)));
363 
364   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
365 
366   /* ZGET_DATABUF_ERR expected */
367   CHECKEQUAL(218, trans->getNdbError().code)
368 
369   trans->close();
370 
371   /* Change error insert so that there are no SectionSegments
372    * This will cause failure when attempting to import the
373    * KeyInfo from the TCKEYREQ
374    */
375   restarter.insertErrorInAllNodes(8067);
376 
377   /* Now a 'short' TCKEYREQ - there will be no space to import the key */
378   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
379                                              &smallKey[0],
380                                              smallKeySize));
381 
382   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
383 
384   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
385   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
386                                        NdbDictionary::getValuePtr
387                                        (record,
388                                         smallRowBuf,
389                                         0)));
390   CHECKEQUAL(0, bigInsertOldApi->setValue(1,
391                                           NdbDictionary::getValuePtr
392                                           (record,
393                                            smallRowBuf,
394                                            1)));
395 
396   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
397 
398   /* ZGET_DATABUF_ERR expected */
399   CHECKEQUAL(218, trans->getNdbError().code)
400 
401   trans->close();
402 #endif
403 
404   /* Finished with error insert, cleanup the error insertion
405    * Error insert 8068 will free the hoarded segments
406    */
407   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
408                                              &smallKey[0],
409                                              smallKeySize));
410 
411   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
412                                           record,
413                                           ctx->getTab(),
414                                           smallRowBuf,
415                                           &restarter,
416                                           8068));
417 
418   trans->execute(NdbTransaction::Rollback);
419 
420   CHECKEQUAL(0, trans->getNdbError().code);
421 
422   trans->close();
423 
424   return NDBT_OK;
425 }
426 
427 /* Test for correct behaviour using unique key operations
428  * when an NDBD node's SegmentedSection pool is exhausted.
429  */
testSegmentedSectionIx(NDBT_Context * ctx,NDBT_Step * step)430 int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){
431   /*
432    * Signal type       Exhausted @              How
433    * -----------------------------------------------------
434    * Long TCINDXREQ    Initial import           Consume + send
435    * Long TCINDXREQ    Build second TCKEYREQ    Consume + send short
436    *                                             w. long base key
437    */
438   /* We will generate :
439    *   10 SS left :
440    *     Long IndexReq with too long Key/AttrInfo
441    *    1 SS left :
442    *     Long IndexReq read with short Key + Attrinfo to long
443    *       base table Key
444    */
445   /* We just run on one table */
446   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0)
447     return NDBT_OK;
448 
449   const char* indexName= "WIDE_2COL_IX$NDBT_IDX0";
450   const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
451   const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
452   const Uint32 maxIndexKeyBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytesIndex;
453   /* We want to use 6 Segmented Sections, each of 60 32-bit words, including
454    * a 2 byte length overhead
455    * (We don't want to use 10 Segmented Sections as in some scenarios TUP
456    *  uses Segmented Sections when sending results, and if we use TUP on
457    *  the same node, the exhaustion will occur in TUP, which is not what
458    *  we're testing)
459    */
460   const Uint32 mediumPrimaryKeyBytes= (6* 60 * 4) - 2;
461   char smallKey[50];
462   char srcBuff[srcBuffBytes];
463   char smallRowBuf[maxRowBytes];
464   char bigKeyIxBuf[maxRowBytes];
465   char bigAttrIxBuf[maxRowBytes];
466   char bigKeyRowBuf[maxRowBytes];
467   char resultSpace[maxRowBytes];
468 
469   /* Small key for hinting to same TC */
470   Uint32 smallKeySize= setLongVarchar(&smallKey[0],
471                                       "ShortKey",
472                                       8);
473 
474   /* Large value source */
475   memset(srcBuff, 'B', srcBuffBytes);
476 
477   Ndb* pNdb= GETNDB(step);
478 
479   const NdbRecord* baseRecord= ctx->getTab()->getDefaultRecord();
480   const NdbRecord* ixRecord= pNdb->
481     getDictionary()->getIndex(indexName,
482                               ctx->getTab()->getName())->getDefaultRecord();
483 
484   /* Setup buffers
485    * Small row buffer with short key and data in base table record format
486    */
487   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
488                                             smallRowBuf,
489                                             0),
490                  "ShortKey",
491                  8);
492   NdbDictionary::setNull(baseRecord, smallRowBuf, 0, false);
493 
494   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
495                                             smallRowBuf,
496                                             1),
497                  "ShortData",
498                  9);
499   NdbDictionary::setNull(baseRecord, smallRowBuf, 1, false);
500 
501   /* Big index key buffer
502    * Big index key (normal row attribute) in index record format
503    * Index's key is attrid 1 from the base table
504    * This could get confusing !
505    */
506 
507   setLongVarchar(NdbDictionary::getValuePtr(ixRecord,
508                                             bigKeyIxBuf,
509                                             1),
510                  &srcBuff[0],
511                  maxIndexKeyBytes);
512   NdbDictionary::setNull(ixRecord, bigKeyIxBuf, 1, false);
513 
514   /* Big AttrInfo buffer
515    * Small key and large attrinfo in base table record format */
516   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
517                                             bigAttrIxBuf,
518                                             0),
519                  "ShortIXKey",
520                  10);
521 
522   NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 0, false);
523 
524   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
525                                             bigAttrIxBuf,
526                                             1),
527                  &srcBuff[0],
528                  maxIndexKeyBytes);
529   NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 1, false);
530 
531   /* Big key row buffer
532    * Medium sized key and small attrinfo (index key) in
533    * base table record format
534    */
535   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
536                                             bigKeyRowBuf,
537                                             0),
538                  &srcBuff[0],
539                  mediumPrimaryKeyBytes);
540 
541   NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 0, false);
542 
543   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
544                                             bigKeyRowBuf,
545                                             1),
546                  "ShortIXKey",
547                  10);
548   NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 1, false);
549 
550 
551   /* Start a transaction on a specific node */
552   NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
553                                                 &smallKey[0],
554                                                 smallKeySize);
555   /* Insert a row in the base table with a big PK, and
556    * small data (Unique IX key).  This is used later to lookup
557    * a big PK and cause overflow when reading TRANSID_AI in TC.
558    */
559   CHECKNOTNULL(trans->insertTuple(baseRecord,
560                                   bigKeyRowBuf));
561 
562   CHECKEQUAL(0, trans->execute(NdbTransaction::Commit));
563 
564   NdbRestarter restarter;
565   /* Start a transaction on a specific node */
566   trans= pNdb->startTransaction(ctx->getTab(),
567                                 &smallKey[0],
568                                 smallKeySize);
569   CHECKNOTNULL(trans);
570 
571   /* Activate error insert 8065 in this transaction, limits any
572    * single append/import to 10 sections.
573    */
574   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
575                                           baseRecord,
576                                           ctx->getTab(),
577                                           smallRowBuf,
578                                           &restarter,
579                                           8065));
580 
581   /* Ok, let's try an index read with a big index key.
582    * Since it's part of the same transaction, it'll go via
583    * the same TC.
584    */
585   const NdbOperation* bigRead= trans->readTuple(ixRecord,
586                                                 bigKeyIxBuf,
587                                                 baseRecord,
588                                                 resultSpace);
589 
590   CHECKNOTNULL(bigRead);
591 
592   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
593 
594   /* ZGET_DATABUF_ERR expected */
595   CHECKEQUAL(218, trans->getNdbError().code)
596 
597   trans->close();
598 
599 
600   /* Ok, now a long TCINDXREQ to the same TC - this
601    * has slightly different abort handling since no other
602    * operations exist in this new transaction.
603    */
604   /* Start a transaction on a specific node */
605   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
606                                              &smallKey[0],
607                                              smallKeySize));
608 
609   CHECKNOTNULL(trans->readTuple(ixRecord,
610                                 bigKeyIxBuf,
611                                 baseRecord,
612                                 resultSpace));
613 
614   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
615 
616   /* ZGET_DATABUF_ERR expected */
617   CHECKEQUAL(218, trans->getNdbError().code);
618 
619   trans->close();
620 
621   /* Now a TCINDXREQ that overflows, but is not the last in the
622    * batch, what happens to the other TCINDXREQ in the batch?
623    */
624   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
625                                              &smallKey[0],
626                                              smallKeySize));
627 
628   CHECKNOTNULL(trans->readTuple(ixRecord,
629                                 bigKeyIxBuf,
630                                 baseRecord,
631                                 resultSpace));
632   /* Another read */
633   CHECKNOTNULL(trans->readTuple(ixRecord,
634                                 bigKeyIxBuf,
635                                 baseRecord,
636                                 resultSpace));
637 
638   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
639 
640   /* ZGET_DATABUF_ERR expected */
641   CHECKEQUAL(218, trans->getNdbError().code);
642 
643   trans->close();
644 
645 
646   /* Next we read a tuple with a large primary key via the unique
647    * index.  The index read itself should be fine, but
648    * pulling in the base table PK will cause abort due to overflow
649    * handling TRANSID_AI
650    */
651   /* Start a transaction on a specific node */
652   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
653                                              &smallKey[0],
654                                              smallKeySize));
655 
656   /* Activate error insert 8066 in this transaction, limits a
657    * single import/append to 1 section.
658    * Note that the TRANSID_AI is received by TC as a short-signal
659    * train, so no single append is large, but when the first
660    * segment is used and append starts on the second, it will
661    * fail.
662    */
663   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
664                                           baseRecord,
665                                           ctx->getTab(),
666                                           smallRowBuf,
667                                           &restarter,
668                                           8066));
669   CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
670 
671   CHECKNOTNULL(bigRead= trans->readTuple(ixRecord,
672                                          bigAttrIxBuf,
673                                          baseRecord,
674                                          resultSpace));
675 
676   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
677 
678   /* ZGET_DATABUF_ERR expected */
679   CHECKEQUAL(218, trans->getNdbError().code)
680 
681   trans->close();
682 
683   // TODO Move short signal testing to testUpgrade
684 #if 0
685   /*
686    * Short TCINDXREQ   KeyInfo accumulate       Consume + send long
687    *                     (TCINDXREQ + KEYINFO)
688    * Short TCINDXREQ   AttrInfo accumulate      Consume + send short key
689    *                                             + long AI
690    *                     (TCINDXREQ + ATTRINFO)
691    */
692   /* Now try with a 'short' TCINDXREQ, generated using the old Api
693    * with a big index key value
694    */
695   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
696                                              &smallKey[0],
697                                              smallKeySize));
698 
699   const NdbDictionary::Index* index;
700   CHECKNOTNULL(index= pNdb->getDictionary()->
701                getIndex(indexName,
702                         ctx->getTab()->getName()));
703 
704   NdbIndexOperation* bigReadOldApi;
705   CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index));
706 
707   CHECKEQUAL(0, bigReadOldApi->readTuple());
708   /* We use the attribute id of the index, not the base table here */
709   CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0,
710                                      NdbDictionary::getValuePtr
711                                      (ixRecord,
712                                       bigKeyIxBuf,
713                                       1)));
714 
715   CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1));
716 
717   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
718 
719   /* ZGET_DATABUF_ERR expected */
720   CHECKEQUAL(218, trans->getNdbError().code)
721 
722   trans->close();
723 
724   /* Now try with a 'short' TCINDXREQ, generated using the old Api
725    * with a big attrinfo value
726    */
727   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
728                                              &smallKey[0],
729                                              smallKeySize));
730 
731   NdbIndexOperation* bigUpdateOldApi;
732   CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
733 
734   CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
735   /* We use the attribute id of the index, not the base table here */
736   CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
737                                        NdbDictionary::getValuePtr
738                                        (baseRecord,
739                                         smallRowBuf,
740                                         1)));
741 
742   CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
743                                           NdbDictionary::getValuePtr
744                                           (baseRecord,
745                                            bigAttrIxBuf,
746                                            1)));
747 
748   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
749 
750   /* ZGET_DATABUF_ERR expected */
751   CHECKEQUAL(218, trans->getNdbError().code)
752 
753   trans->close();
754 
755   /* Change error insert so that next TCINDXREQ will grab
756    * all but one SegmentedSection
757    */
758   restarter.insertErrorInAllNodes(8066);
759 
760   /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
761    * can be imported, but the ATTRINFO can't
762    */
763   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
764                                              &smallKey[0],
765                                              smallKeySize));
766 
767   CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
768 
769   CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
770   /* We use the attribute id of the index, not the base table here */
771   CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
772                                        NdbDictionary::getValuePtr
773                                        (baseRecord,
774                                         smallRowBuf,
775                                         1)));
776 
777   CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
778                                           NdbDictionary::getValuePtr
779                                           (baseRecord,
780                                            bigAttrIxBuf,
781                                            1)));
782 
783   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
784 
785   /* ZGET_DATABUF_ERR expected */
786   CHECKEQUAL(218, trans->getNdbError().code)
787 
788   trans->close();
789 
790   /* Change error insert so that there are no SectionSegments */
791   restarter.insertErrorInAllNodes(8067);
792 
793   /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
794    * can't be imported
795    */
796   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
797                                              &smallKey[0],
798                                              smallKeySize));
799 
800   CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
801 
802   CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
803   /* We use the attribute id of the index, not the base table here */
804   CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
805                                        NdbDictionary::getValuePtr
806                                        (baseRecord,
807                                         smallRowBuf,
808                                         1)));
809 
810   CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
811                                           NdbDictionary::getValuePtr
812                                           (baseRecord,
813                                            bigAttrIxBuf,
814                                            1)));
815 
816   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
817 
818   /* ZGET_DATABUF_ERR expected */
819   CHECKEQUAL(218, trans->getNdbError().code)
820 
821   trans->close();
822 
823 #endif
824 
825   /* Finished with error insert, cleanup the error insertion */
826   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
827                                              &smallKey[0],
828                                              smallKeySize));
829 
830   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
831                                           baseRecord,
832                                           ctx->getTab(),
833                                           smallRowBuf,
834                                           &restarter,
835                                           8068));
836 
837   trans->execute(NdbTransaction::Rollback);
838 
839   CHECKEQUAL(0, trans->getNdbError().code);
840 
841   trans->close();
842 
843   return NDBT_OK;
844 }
845 
846 
testSegmentedSectionScan(NDBT_Context * ctx,NDBT_Step * step)847 int testSegmentedSectionScan(NDBT_Context* ctx, NDBT_Step* step){
848   /* Test that TC handling of segmented section exhaustion is
849    * correct
850    * Since NDBAPI always send long requests, that is all that
851    * we test
852    */
853     /* We just run on one table */
854   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
855     return NDBT_OK;
856 
857   const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
858   char smallKey[50];
859   char smallRowBuf[maxRowBytes];
860 
861   Uint32 smallKeySize= setLongVarchar(&smallKey[0],
862                                       "ShortKey",
863                                       8);
864 
865   const NdbRecord* record= ctx->getTab()->getDefaultRecord();
866 
867   /* Setup buffers
868    * Small row buffer with small key and small data
869    */
870   setLongVarchar(NdbDictionary::getValuePtr(record,
871                                             smallRowBuf,
872                                             0),
873                  "ShortKey",
874                  8);
875   NdbDictionary::setNull(record, smallRowBuf, 0, false);
876 
877   setLongVarchar(NdbDictionary::getValuePtr(record,
878                                             smallRowBuf,
879                                             1),
880                  "ShortData",
881                  9);
882   NdbDictionary::setNull(record, smallRowBuf, 1, false);
883 
884   NdbRestarter restarter;
885   Ndb* pNdb= GETNDB(step);
886 
887   /* Start a transaction on a specific node */
888   NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
889                                                 &smallKey[0],
890                                                 smallKeySize);
891   CHECKNOTNULL(trans);
892 
893   /* Activate error insert 8066 in this transaction, limits a
894    * single import/append to 1 section.
895    */
896   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
897                                           record,
898                                           ctx->getTab(),
899                                           smallRowBuf,
900                                           &restarter,
901                                           8066));
902 
903   /* A scan will always send 2 long sections (Receiver Ids,
904    * AttrInfo)
905    * Let's start a scan with > 2400 bytes of
906    * ATTRINFO and see what happens
907    */
908   NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
909 
910   CHECKNOTNULL(scan);
911 
912   CHECKEQUAL(0, scan->readTuples());
913 
914   /* Create a particularly useless program */
915   NdbInterpretedCode prog;
916 
917   for (Uint32 w=0; w < 2500; w++)
918     CHECKEQUAL(0, prog.load_const_null(1));
919 
920   CHECKEQUAL(0, prog.interpret_exit_ok());
921   CHECKEQUAL(0, prog.finalise());
922 
923   CHECKEQUAL(0, scan->setInterpretedCode(&prog));
924 
925   /* Api doesn't seem to wait for result of scan request */
926   CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
927 
928   CHECKEQUAL(0, trans->getNdbError().code);
929 
930   CHECKEQUAL(-1, scan->nextResult());
931 
932   CHECKEQUAL(217, scan->getNdbError().code);
933 
934   trans->close();
935 
936   /* Finished with error insert, cleanup the error insertion */
937   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
938                                              &smallKey[0],
939                                              smallKeySize));
940 
941   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
942                                           record,
943                                           ctx->getTab(),
944                                           smallRowBuf,
945                                           &restarter,
946                                           8068));
947 
948   CHECKEQUAL(0, trans->execute(NdbTransaction::Rollback));
949 
950   CHECKEQUAL(0, trans->getNdbError().code);
951 
952   trans->close();
953 
954   return NDBT_OK;
955 }
956 
testDropSignalFragments(NDBT_Context * ctx,NDBT_Step * step)957 int testDropSignalFragments(NDBT_Context* ctx, NDBT_Step* step){
958   /* Segmented section exhaustion results in dropped signals
959    * Fragmented signals split one logical signal over multiple
960    * physical signals (to cope with the MAX_SIGNAL_LENGTH=32kB
961    * limitation).
962    * This testcase checks that when individual signals comprising
963    * a fragmented signal (in this case SCANTABREQ) are dropped, the
964    * system behaves correctly.
965    * Correct behaviour is to behave in the same way as if the signal
966    * was not fragmented, and for SCANTABREQ, to return a temporary
967    * resource error.
968    */
969   NdbRestarter restarter;
970   Ndb* pNdb= GETNDB(step);
971 
972   /* SEND > ((2 * MAX_SEND_MESSAGE_BYTESIZE) + SOME EXTRA)
973    * This way we get at least 3 fragments
974    * However, as this is generally > 64kB, it's too much AttrInfo for
975    * a ScanTabReq, so the 'success' case returns error 874
976    */
977   const Uint32 PROG_WORDS= 16500;
978 
979   struct SubCase
980   {
981     Uint32 errorInsertCode;
982     int expectedRc;
983   };
984   const Uint32 numSubCases= 5;
985   const SubCase cases[numSubCases]=
986   /* Error insert   Scanrc */
987     {{          0,     874},  // Normal, success which gives too much AI error
988      {       8074,     217},  // Drop first fragment -> error 217
989      {       8075,     217},  // Drop middle fragment(s) -> error 217
990      {       8076,     217},  // Drop last fragment -> error 217
991      {       8077,     217}}; // Drop all fragments -> error 217
992   const Uint32 numIterations= 50;
993 
994   Uint32 buff[ PROG_WORDS + 10 ]; // 10 extra for final 'return' etc.
995 
996   for (Uint32 iteration=0; iteration < (numIterations * numSubCases); iteration++)
997   {
998     /* Start a transaction */
999     NdbTransaction* trans= pNdb->startTransaction();
1000     CHECKNOTNULL(trans);
1001 
1002     SubCase subcase= cases[iteration % numSubCases];
1003 
1004     Uint32 errorInsertVal= subcase.errorInsertCode;
1005     // printf("Inserting error : %u\n", errorInsertVal);
1006     /* We insert the error twice, to bias races between
1007      * error-insert propagation and the succeeding scan
1008      * in favour of error insert winning!
1009      * This problem needs a more general fix
1010      */
1011     CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1012     CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1013 
1014     NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
1015 
1016     CHECKNOTNULL(scan);
1017 
1018     CHECKEQUAL(0, scan->readTuples());
1019 
1020     /* Create a large program, to give a large SCANTABREQ */
1021     NdbInterpretedCode prog(ctx->getTab(), buff, PROG_WORDS + 10);
1022 
1023     for (Uint32 w=0; w < PROG_WORDS; w++)
1024       CHECKEQUAL(0, prog.load_const_null(1));
1025 
1026     CHECKEQUAL(0, prog.interpret_exit_ok());
1027     CHECKEQUAL(0, prog.finalise());
1028 
1029     CHECKEQUAL(0, scan->setInterpretedCode(&prog));
1030 
1031     /* Api doesn't seem to wait for result of scan request */
1032     CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
1033 
1034     CHECKEQUAL(0, trans->getNdbError().code);
1035 
1036     CHECKEQUAL(-1, scan->nextResult());
1037 
1038     int expectedResult= subcase.expectedRc;
1039     CHECKEQUAL(expectedResult, scan->getNdbError().code);
1040 
1041     scan->close();
1042 
1043     trans->close();
1044   }
1045 
1046   restarter.insertErrorInAllNodes(0);
1047 
1048   return NDBT_OK;
1049 }
1050 
create100Tables(NDBT_Context * ctx,NDBT_Step * step)1051 int create100Tables(NDBT_Context* ctx, NDBT_Step* step)
1052 {
1053   Ndb* pNdb = GETNDB(step);
1054   const NdbDictionary::Table* pTab= ctx->getTab();
1055 
1056   /* Run as a 'T1' testcase - do nothing for other tables */
1057   if (strcmp(pTab->getName(), "T1") != 0)
1058     return NDBT_OK;
1059 
1060   for (Uint32 t=0; t < 100; t++)
1061   {
1062     char tabnameBuff[10];
1063     snprintf(tabnameBuff, sizeof(tabnameBuff), "TAB%u", t);
1064 
1065     NdbDictionary::Table tab;
1066     tab.setName(tabnameBuff);
1067     NdbDictionary::Column pk;
1068     pk.setName("PK");
1069     pk.setType(NdbDictionary::Column::Varchar);
1070     pk.setLength(20);
1071     pk.setNullable(false);
1072     pk.setPrimaryKey(true);
1073     tab.addColumn(pk);
1074 
1075     pNdb->getDictionary()->dropTable(tab.getName());
1076     if(pNdb->getDictionary()->createTable(tab) != 0)
1077     {
1078       ndbout << "Create table failed with error : "
1079              << pNdb->getDictionary()->getNdbError().code
1080              << " "
1081              << pNdb->getDictionary()->getNdbError().message
1082              << endl;
1083       return NDBT_FAILED;
1084     }
1085 
1086     ndbout << "Created table " << tabnameBuff << endl;
1087   }
1088 
1089   return NDBT_OK;
1090 }
1091 
drop100Tables(NDBT_Context * ctx,NDBT_Step * step)1092 int drop100Tables(NDBT_Context* ctx, NDBT_Step* step)
1093 {
1094   Ndb* pNdb = GETNDB(step);
1095   const NdbDictionary::Table* pTab= ctx->getTab();
1096 
1097   /* Run as a 'T1' testcase - do nothing for other tables */
1098   if (strcmp(pTab->getName(), "T1") != 0)
1099     return NDBT_OK;
1100 
1101   for (Uint32 t=0; t < 100; t++)
1102   {
1103     char tabnameBuff[10];
1104     snprintf(tabnameBuff, sizeof(tabnameBuff), "TAB%u", t);
1105 
1106     if (pNdb->getDictionary()->dropTable(tabnameBuff) != 0)
1107     {
1108       ndbout << "Drop table failed with error : "
1109              << pNdb->getDictionary()->getNdbError().code
1110              << " "
1111              << pNdb->getDictionary()->getNdbError().message
1112              << endl;
1113     }
1114     else
1115     {
1116       ndbout << "Dropped table " << tabnameBuff << endl;
1117     }
1118   }
1119 
1120   return NDBT_OK;
1121 }
1122 
dropTable(NDBT_Context * ctx,NDBT_Step * step,Uint32 num)1123 int dropTable(NDBT_Context* ctx, NDBT_Step* step, Uint32 num)
1124 {
1125   Ndb* pNdb = GETNDB(step);
1126   const NdbDictionary::Table* pTab= ctx->getTab();
1127 
1128   /* Run as a 'T1' testcase - do nothing for other tables */
1129   if (strcmp(pTab->getName(), "T1") != 0)
1130     return NDBT_OK;
1131 
1132   char tabnameBuff[10];
1133   snprintf(tabnameBuff, sizeof(tabnameBuff), "TAB%u", num);
1134 
1135   if (pNdb->getDictionary()->dropTable(tabnameBuff) != 0)
1136   {
1137     ndbout << "Drop table failed with error : "
1138            << pNdb->getDictionary()->getNdbError().code
1139            << " "
1140            << pNdb->getDictionary()->getNdbError().message
1141            << endl;
1142   }
1143   else
1144   {
1145     ndbout << "Dropped table " << tabnameBuff << endl;
1146   }
1147 
1148   return NDBT_OK;
1149 }
1150 
1151 
1152 enum Scenarios
1153 {
1154 //  NORMAL,  // Commented to save some time.
1155   DROP_TABLE,
1156   RESTART_MASTER,
1157   RESTART_SLAVE,
1158   NUM_SCENARIOS
1159 };
1160 
1161 
1162 enum Tasks
1163 {
1164   WAIT = 0,
1165   DROP_TABLE_REQ = 1,
1166   MASTER_RESTART_REQ = 2,
1167   SLAVE_RESTART_REQ = 3
1168 };
1169 
testWorker(NDBT_Context * ctx,NDBT_Step * step)1170 int testWorker(NDBT_Context* ctx, NDBT_Step* step)
1171 {
1172   /* Run as a 'T1' testcase - do nothing for other tables */
1173   if (strcmp(ctx->getTab()->getName(), "T1") != 0)
1174     return NDBT_OK;
1175 
1176   /* Worker step to run in a separate thread for
1177    * blocking activities
1178    * Generally the blocking of the DIH table definition flush
1179    * blocks the completion of the drop table/node restarts,
1180    * so this must be done in a separate thread to avoid
1181    * deadlocks.
1182    */
1183 
1184   while (!ctx->isTestStopped())
1185   {
1186     ndbout_c("Worker : waiting for request...");
1187     ctx->getPropertyWait("DIHWritesRequest", 1);
1188 
1189     if (!ctx->isTestStopped())
1190     {
1191       Uint32 req = ctx->getProperty("DIHWritesRequestType", (Uint32)0);
1192 
1193       switch ((Tasks) req)
1194       {
1195       case DROP_TABLE_REQ:
1196       {
1197         /* Drop table */
1198         ndbout_c("Worker : dropping table");
1199         if (dropTable(ctx, step, 2) != NDBT_OK)
1200         {
1201           return NDBT_FAILED;
1202         }
1203         ndbout_c("Worker : table dropped.");
1204         break;
1205       }
1206       case MASTER_RESTART_REQ:
1207       {
1208         ndbout_c("Worker : restarting Master");
1209 
1210         NdbRestarter restarter;
1211         int master_nodeid = restarter.getMasterNodeId();
1212         ndbout_c("Worker : Restarting Master (%d)...", master_nodeid);
1213         if (restarter.restartOneDbNode2(master_nodeid,
1214                                         NdbRestarter::NRRF_NOSTART |
1215                                         NdbRestarter::NRRF_FORCE |
1216                                         NdbRestarter::NRRF_ABORT) ||
1217             restarter.waitNodesNoStart(&master_nodeid, 1) ||
1218             restarter.startAll())
1219         {
1220           ndbout_c("Worker : Error restarting Master.");
1221           return NDBT_FAILED;
1222         }
1223         ndbout_c("Worker : Waiting for master to recover...");
1224         if (restarter.waitNodesStarted(&master_nodeid, 1))
1225         {
1226           ndbout_c("Worker : Error waiting for Master restart");
1227           return NDBT_FAILED;
1228         }
1229         ndbout_c("Worker : Master recovered.");
1230         break;
1231       }
1232       case SLAVE_RESTART_REQ:
1233       {
1234         NdbRestarter restarter;
1235         int slave_nodeid = restarter.getRandomNotMasterNodeId(rand());
1236         ndbout_c("Worker : Restarting non-master (%d)...", slave_nodeid);
1237         if (restarter.restartOneDbNode2(slave_nodeid,
1238                                         NdbRestarter::NRRF_NOSTART |
1239                                         NdbRestarter::NRRF_FORCE |
1240                                         NdbRestarter::NRRF_ABORT) ||
1241             restarter.waitNodesNoStart(&slave_nodeid, 1) ||
1242             restarter.startAll())
1243         {
1244           ndbout_c("Worker : Error restarting Slave.");
1245           return NDBT_FAILED;
1246         }
1247         ndbout_c("Worker : Waiting for slave to recover...");
1248         if (restarter.waitNodesStarted(&slave_nodeid, 1))
1249         {
1250           ndbout_c("Worker : Error waiting for Slave restart");
1251           return NDBT_FAILED;
1252         }
1253         ndbout_c("Worker : Slave recovered.");
1254         break;
1255       }
1256       default:
1257       {
1258         break;
1259       }
1260       }
1261     }
1262     ctx->setProperty("DIHWritesRequestType", (Uint32) 0);
1263     ctx->setProperty("DIHWritesRequest", (Uint32) 2);
1264   }
1265 
1266   ndbout_c("Worker, done.");
1267   return NDBT_OK;
1268 }
1269 
testSlowDihFileWrites(NDBT_Context * ctx,NDBT_Step * step)1270 int testSlowDihFileWrites(NDBT_Context* ctx, NDBT_Step* step)
1271 {
1272   /* Testcase checks behaviour with slow flushing of DIH table definitions
1273    * This caused problems in the past by exhausting the DIH page pool
1274    * Now there's a concurrent operations limit.
1275    * Check that it behaves with many queued ops, parallel drop/node restarts
1276    */
1277 
1278   /* Run as a 'T1' testcase - do nothing for other tables */
1279   if (strcmp(ctx->getTab()->getName(), "T1") != 0)
1280     return NDBT_OK;
1281 
1282   /* 1. Activate slow write error insert
1283    * 2. Trigger LCP
1284    * 3. Wait some time, periodically producing info on
1285    *    the internal state
1286    * 4. Perform some parallel action (drop table/node restarts)
1287    * 5. Wait some time, periodically producing info on
1288    *    the internal state
1289    * 6. Clear the error insert
1290    * 7. Wait a little longer
1291    * 8. Done.
1292    */
1293   NdbRestarter restarter;
1294 
1295   for (Uint32 scenario = 0;  scenario < NUM_SCENARIOS; scenario++)
1296   {
1297     ndbout_c("Inserting error 7235");
1298     restarter.insertErrorInAllNodes(7235);
1299 
1300     ndbout_c("Triggering LCP");
1301     int dumpArg = 7099;
1302     restarter.dumpStateAllNodes(&dumpArg, 1);
1303 
1304     const Uint32 periodSeconds = 10;
1305     Uint32 waitPeriods = 6;
1306     dumpArg = 7032;
1307 
1308     for (Uint32 p=0; p<waitPeriods; p++)
1309     {
1310       if (p == 3)
1311       {
1312         switch ((Scenarios) scenario)
1313         {
1314         case DROP_TABLE:
1315         {
1316           /* Drop one of the early-created tables */
1317           ndbout_c("Requesting DROP TABLE");
1318           ctx->setProperty("DIHWritesRequestType", (Uint32) DROP_TABLE_REQ);
1319           ctx->setProperty("DIHWritesRequest", (Uint32) 1);
1320           break;
1321         }
1322         case RESTART_MASTER:
1323         {
1324           ndbout_c("Requesting Master restart");
1325           ctx->setProperty("DIHWritesRequestType", (Uint32) MASTER_RESTART_REQ);
1326           ctx->setProperty("DIHWritesRequest", (Uint32) 1);
1327 
1328           break;
1329         }
1330         case RESTART_SLAVE:
1331         {
1332           ndbout_c("Requesting Slave restart");
1333           ctx->setProperty("DIHWritesRequestType", (Uint32) SLAVE_RESTART_REQ);
1334           ctx->setProperty("DIHWritesRequest", (Uint32) 1);
1335 
1336           break;
1337         }
1338         default:
1339           break;
1340         }
1341       }
1342 
1343       ndbout_c("Dumping DIH page info to ndbd stdout");
1344       restarter.dumpStateAllNodes(&dumpArg, 1);
1345       NdbSleep_MilliSleep(periodSeconds * 1000);
1346     }
1347 
1348     ndbout_c("Clearing error insert...");
1349     restarter.insertErrorInAllNodes(0);
1350 
1351     waitPeriods = 2;
1352     for (Uint32 p=0; p<waitPeriods; p++)
1353     {
1354       ndbout_c("Dumping DIH page info to ndbd stdout");
1355       restarter.dumpStateAllNodes(&dumpArg, 1);
1356       NdbSleep_MilliSleep(periodSeconds * 1000);
1357     }
1358 
1359     ndbout_c("Waiting for worker to finish task...");
1360     ctx->getPropertyWait("DIHWritesRequest", 2);
1361 
1362     if (ctx->isTestStopped())
1363       return NDBT_OK;
1364 
1365     ndbout_c("Done.");
1366   }
1367 
1368   /* Finish up */
1369   ctx->stopTest();
1370 
1371   return NDBT_OK;
1372 }
1373 
1374 
1375 NDBT_TESTSUITE(testLimits);
1376 
1377 TESTCASE("ExhaustSegmentedSectionPk",
1378          "Test behaviour at Segmented Section exhaustion for PK"){
1379   INITIALIZER(testSegmentedSectionPk);
1380 }
1381 
1382 TESTCASE("ExhaustSegmentedSectionIX",
1383          "Test behaviour at Segmented Section exhaustion for Unique index"){
1384   INITIALIZER(testSegmentedSectionIx);
1385 }
1386 TESTCASE("ExhaustSegmentedSectionScan",
1387          "Test behaviour at Segmented Section exhaustion for Scan"){
1388   INITIALIZER(testSegmentedSectionScan);
1389 }
1390 
1391 TESTCASE("DropSignalFragments",
1392          "Test behaviour of Segmented Section exhaustion with fragmented signals"){
1393   INITIALIZER(testDropSignalFragments);
1394 }
1395 
1396 TESTCASE("SlowDihFileWrites",
1397          "Test behaviour of slow Dih table file writes")
1398 {
1399   INITIALIZER(create100Tables);
1400   STEP(testWorker);
1401   STEP(testSlowDihFileWrites);
1402   FINALIZER(drop100Tables);
1403 }
1404 
1405 NDBT_TESTSUITE_END(testLimits);
1406 
main(int argc,const char ** argv)1407 int main(int argc, const char** argv){
1408   ndb_init();
1409   NDBT_TESTSUITE_INSTANCE(testLimits);
1410   return testLimits.execute(argc, argv);
1411 }
1412