1 /*
2    Copyright (c) 2008, 2019, Oracle and/or its affiliates.  All rights reserved
3 
4    This program is free software; you can redistribute it and/or modify
5    it under the terms of the GNU General Public License, version 2.0,
6    as published by the Free Software Foundation.
7 
8    This program is also distributed with certain software (including
9    but not limited to OpenSSL) that is licensed under separate terms,
10    as designated in a particular file or component or in included license
11    documentation.  The authors of MySQL hereby grant you an additional
12    permission to link the program and your derivative works with the
13    separately licensed software that they have included with MySQL.
14 
15    This program is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License, version 2.0, for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA */
23 
24 #include <NDBT.hpp>
25 #include <NDBT_Test.hpp>
26 #include <NdbRestarter.hpp>
27 
28 #define CHECKNOTNULL(p) if ((p) == NULL) {          \
29     ndbout << "Error at line " << __LINE__ << endl; \
30     NDB_ERR(trans->getNdbError());                  \
31     trans->close();                                 \
32     return NDBT_FAILED; }
33 
34 #define CHECKEQUAL(v, e) if ((e) != (v)) {            \
35     ndbout << "Error at line " << __LINE__ <<         \
36       " expected " << v << endl;                      \
37     NDB_ERR(trans->getNdbError());                    \
38     trans->close();                                   \
39     return NDBT_FAILED; }
40 
41 #define CHECK(v) if (!(v)) {                      \
42     ndbout << "Error at line " << __LINE__ <<         \
43       endl;                                           \
44     return NDBT_FAILED; }
45 
46 /* Setup memory as a long Varchar with 2 bytes of
47  * length information
48  */
setLongVarchar(char * where,const char * what,Uint32 sz)49 Uint32 setLongVarchar(char* where, const char* what, Uint32 sz)
50 {
51   where[0]=sz & 0xff;
52   where[1]=(sz >> 8) & 0xff;
53   memcpy(&where[2], what, sz);
54   return (sz + 2);
55 }
56 
57 
58 /* Activate the given error insert in TC block
59  * This is used for error insertion where a TCKEYREQ
60  * is required to activate the error
61  */
activateErrorInsert(NdbTransaction * trans,const NdbRecord * record,const NdbDictionary::Table * tab,const char * buf,NdbRestarter * restarter,Uint32 val)62 int activateErrorInsert(NdbTransaction* trans,
63                         const NdbRecord* record,
64                         const NdbDictionary::Table* tab,
65                         const char* buf,
66                         NdbRestarter* restarter,
67                         Uint32 val)
68 {
69   /* We insert the error twice to avoid what appear to be
70    * races between the error insert and the subsequent
71    * tests
72    * Alternatively we could sleep here.
73    */
74   if (restarter->insertErrorInAllNodes(val) != 0){
75     g_err << "error insert 1 (" << val << ") failed" << endl;
76     return NDBT_FAILED;
77   }
78   if (restarter->insertErrorInAllNodes(val) != 0){
79     g_err << "error insert 2 (" << val << ") failed" << endl;
80     return NDBT_FAILED;
81   }
82 
83   NdbOperation* insert= trans->getNdbOperation(tab);
84 
85   CHECKNOTNULL(insert);
86 
87   CHECKEQUAL(0, insert->insertTuple());
88 
89   CHECKEQUAL(0, insert->equal((Uint32) 0,
90                               NdbDictionary::getValuePtr
91                               (record,
92                                buf,
93                                0)));
94   CHECKEQUAL(0, insert->setValue(1,
95                                  NdbDictionary::getValuePtr
96                                  (record,
97                                   buf,
98                                   1)));
99 
100   CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
101 
102   CHECKEQUAL(0, trans->getNdbError().code);
103 
104   return NDBT_OK;
105 }
106 
107 /* Test for correct behaviour using primary key operations
108  * when an NDBD node's SegmentedSection pool is exhausted.
109  */
testSegmentedSectionPk(NDBT_Context * ctx,NDBT_Step * step)110 int testSegmentedSectionPk(NDBT_Context* ctx, NDBT_Step* step){
111   /*
112    * Signal type       Exhausted @              How
113    * -----------------------------------------------------
114    * Long TCKEYREQ     Initial import           Consume + send
115    * Long TCKEYREQ     Initial import, not first
116    *                     TCKEYREQ in batch      Consume + send
117    * Long TCKEYREQ     Initial import, not last
118    *                     TCKEYREQ in batch      Consume + send
119    * No testing of short TCKEYREQ variants as they cannot be
120    * generated in mysql-5.1-telco-6.4+
121    * TODO : Add short variant testing to testUpgrade.
122    */
123 
124   /* We just run on one table */
125   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
126     return NDBT_OK;
127 
128   const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
129   const Uint32 maxKeyBytes= NDBT_Tables::MaxVarTypeKeyBytes;
130   const Uint32 maxAttrBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytes;
131   const Uint32 srcBuffBytes= MAX(maxKeyBytes,maxAttrBytes);
132   char smallKey[50];
133   char srcBuff[srcBuffBytes];
134   char smallRowBuf[maxRowBytes];
135   char bigKeyRowBuf[maxRowBytes];
136   char bigAttrRowBuf[maxRowBytes];
137 
138   /* Small key for hinting to same TC */
139   Uint32 smallKeySize= setLongVarchar(&smallKey[0],
140                                       "ShortKey",
141                                       8);
142 
143   /* Large value source */
144   memset(srcBuff, 'B', srcBuffBytes);
145 
146   const NdbRecord* record= ctx->getTab()->getDefaultRecord();
147 
148   /* Setup buffers
149    * Small row buffer with small key and small data
150    */
151   setLongVarchar(NdbDictionary::getValuePtr(record,
152                                             smallRowBuf,
153                                             0),
154                  "ShortKey",
155                  8);
156   NdbDictionary::setNull(record, smallRowBuf, 0, false);
157 
158   setLongVarchar(NdbDictionary::getValuePtr(record,
159                                             smallRowBuf,
160                                             1),
161                  "ShortData",
162                  9);
163   NdbDictionary::setNull(record, smallRowBuf, 1, false);
164 
165   /* Big key buffer with big key and small data*/
166   setLongVarchar(NdbDictionary::getValuePtr(record,
167                                             bigKeyRowBuf,
168                                             0),
169                  &srcBuff[0],
170                  maxKeyBytes);
171   NdbDictionary::setNull(record, bigKeyRowBuf, 0, false);
172 
173   setLongVarchar(NdbDictionary::getValuePtr(record,
174                                             bigKeyRowBuf,
175                                             1),
176                  "ShortData",
177                  9);
178   NdbDictionary::setNull(record, bigKeyRowBuf, 1, false);
179 
180   /* Big AttrInfo buffer with small key and big data */
181   setLongVarchar(NdbDictionary::getValuePtr(record,
182                                             bigAttrRowBuf,
183                                             0),
184                  "ShortKey",
185                  8);
186   NdbDictionary::setNull(record, bigAttrRowBuf, 0, false);
187 
188   setLongVarchar(NdbDictionary::getValuePtr(record,
189                                             bigAttrRowBuf,
190                                             1),
191                  &srcBuff[0],
192                  maxAttrBytes);
193   NdbDictionary::setNull(record, bigAttrRowBuf, 1, false);
194 
195   NdbRestarter restarter;
196   Ndb* pNdb= GETNDB(step);
197 
198   /* Start a transaction on a specific node */
199   NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
200                                                 &smallKey[0],
201                                                 smallKeySize);
202   CHECKNOTNULL(trans);
203 
204   /* Activate error insert 8065 in this transaction, limits
205    * any single import/append to 1 section
206    */
207   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
208                                           record,
209                                           ctx->getTab(),
210                                           smallRowBuf,
211                                           &restarter,
212                                           8065));
213 
214   /* Ok, let's try an insert with a key bigger than 1 section.
215    * Since it's part of the same transaction, it'll go via
216    * the same TC.
217    */
218   const NdbOperation* bigInsert = trans->insertTuple(record, bigKeyRowBuf);
219 
220   CHECKNOTNULL(bigInsert);
221 
222   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
223 
224   /* ZGET_DATABUF_ERR expected */
225   CHECKEQUAL(218, trans->getNdbError().code)
226 
227   trans->close();
228 
229   /* Ok, now a long TCKEYREQ to the same TC - this
230    * has slightly different abort handling since no other
231    * operations exist in this new transaction.
232    * We also change it so that import overflow occurs
233    * on the AttrInfo section
234    */
235   /* Start transaction on the same node */
236   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
237                                              &smallKey[0],
238                                              smallKeySize));
239 
240 
241   CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
242 
243   CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
244 
245   /* ZGET_DATABUF_ERR expected */
246   CHECKEQUAL(218, trans->getNdbError().code);
247 
248   trans->close();
249 
250   /* Ok, now a long TCKEYREQ where we run out of SegmentedSections
251    * on the first TCKEYREQ, but there are other TCKEYREQs following
252    * in the same batch.  Check that abort handling is correct
253    */
254     /* Start transaction on the same node */
255   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
256                                              &smallKey[0],
257                                              smallKeySize));
258   /* First op in batch, will cause overflow */
259   CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
260 
261   /* Second op in batch, what happens to it? */
262   const NdbOperation* secondOp;
263   CHECKNOTNULL(secondOp = trans->insertTuple(record, bigAttrRowBuf));
264 
265 
266   CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
267 
268   /* ZGET_DATABUF_ERR expected */
269   CHECKEQUAL(218, trans->getNdbError().code);
270 
271   trans->close();
272 
273   /* Now try with a 'short' TCKEYREQ, generated using the old Api
274    * with a big key value
275    */
276   /* Start transaction on the same node */
277   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
278                                              &smallKey[0],
279                                              smallKeySize));
280 
281   NdbOperation* bigInsertOldApi;
282   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
283 
284   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
285   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
286                                        NdbDictionary::getValuePtr
287                                        (record,
288                                         bigKeyRowBuf,
289                                         0)));
290   CHECKEQUAL(0, bigInsertOldApi->setValue(1,
291                                           NdbDictionary::getValuePtr
292                                           (record,
293                                            bigKeyRowBuf,
294                                            1)));
295 
296   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
297 
298   /* ZGET_DATABUF_ERR expected */
299   CHECKEQUAL(218, trans->getNdbError().code)
300 
301   trans->close();
302 
303   /* Now try with a 'short' TCKEYREQ, generated using the old Api
304    * with a big data value
305    */
306   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
307                                              &smallKey[0],
308                                              smallKeySize));
309 
310   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
311 
312   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
313   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
314                                        NdbDictionary::getValuePtr
315                                        (record,
316                                         bigAttrRowBuf,
317                                         0)));
318   CHECKEQUAL(0, bigInsertOldApi->setValue(1,
319                                           NdbDictionary::getValuePtr
320                                           (record,
321                                            bigAttrRowBuf,
322                                            1)));
323 
324   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
325 
326   /* ZGET_DATABUF_ERR expected */
327   CHECKEQUAL(218, trans->getNdbError().code)
328 
329   trans->close();
330 
331   // TODO : Add code to testUpgrade
332 #if 0
333   /*
334    * Short TCKEYREQ    KeyInfo accumulate       Consume + send long
335    *                     (TCKEYREQ + KEYINFO)
336    * Short TCKEYREQ    AttrInfo accumulate      Consume + send short key
337    *                                             + long AI
338    *                      (TCKEYREQ + ATTRINFO)
339    */
340   /* Change error insert so that next TCKEYREQ will grab
341    * all but one SegmentedSection so that we can then test SegmentedSection
342    * exhaustion when importing the Key/AttrInfo words from the
343    * TCKEYREQ signal itself.
344    */
345   restarter.insertErrorInAllNodes(8066);
346 
347 
348   /* Now a 'short' TCKEYREQ, there will be space to import the
349    * short key, but not the AttrInfo
350    */
351   /* Start transaction on same node */
352   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
353                                              &smallKey[0],
354                                              smallKeySize));
355 
356   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
357 
358   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
359   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
360                                        NdbDictionary::getValuePtr
361                                        (record,
362                                         smallRowBuf,
363                                         0)));
364   CHECKEQUAL(0, bigInsertOldApi->setValue(1, NdbDictionary::getValuePtr
365                                           (record,
366                                            smallRowBuf,
367                                            1)));
368 
369   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
370 
371   /* ZGET_DATABUF_ERR expected */
372   CHECKEQUAL(218, trans->getNdbError().code)
373 
374   trans->close();
375 
376   /* Change error insert so that there are no SectionSegments
377    * This will cause failure when attempting to import the
378    * KeyInfo from the TCKEYREQ
379    */
380   restarter.insertErrorInAllNodes(8067);
381 
382   /* Now a 'short' TCKEYREQ - there will be no space to import the key */
383   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
384                                              &smallKey[0],
385                                              smallKeySize));
386 
387   CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
388 
389   CHECKEQUAL(0, bigInsertOldApi->insertTuple());
390   CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
391                                        NdbDictionary::getValuePtr
392                                        (record,
393                                         smallRowBuf,
394                                         0)));
395   CHECKEQUAL(0, bigInsertOldApi->setValue(1,
396                                           NdbDictionary::getValuePtr
397                                           (record,
398                                            smallRowBuf,
399                                            1)));
400 
401   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
402 
403   /* ZGET_DATABUF_ERR expected */
404   CHECKEQUAL(218, trans->getNdbError().code)
405 
406   trans->close();
407 #endif
408 
409   /* Finished with error insert, cleanup the error insertion
410    * Error insert 8068 will free the hoarded segments
411    */
412   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
413                                              &smallKey[0],
414                                              smallKeySize));
415 
416   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
417                                           record,
418                                           ctx->getTab(),
419                                           smallRowBuf,
420                                           &restarter,
421                                           8068));
422 
423   trans->execute(NdbTransaction::Rollback);
424 
425   CHECKEQUAL(0, trans->getNdbError().code);
426 
427   trans->close();
428 
429   return NDBT_OK;
430 }
431 
432 /* Test for correct behaviour using unique key operations
433  * when an NDBD node's SegmentedSection pool is exhausted.
434  */
testSegmentedSectionIx(NDBT_Context * ctx,NDBT_Step * step)435 int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){
436   /*
437    * Signal type       Exhausted @              How
438    * -----------------------------------------------------
439    * Long TCINDXREQ    Initial import           Consume + send
440    * Long TCINDXREQ    Build second TCKEYREQ    Consume + send short
441    *                                             w. long base key
442    */
443   /* We will generate :
444    *   10 SS left :
445    *     Long IndexReq with too long Key/AttrInfo
446    *    1 SS left :
447    *     Long IndexReq read with short Key + Attrinfo to long
448    *       base table Key
449    */
450   /* We just run on one table */
451   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0)
452     return NDBT_OK;
453 
454   const char* indexName= "WIDE_2COL_IX$NDBT_IDX0";
455   const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
456   const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
457   const Uint32 maxIndexKeyBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytesIndex;
458   /* We want to use 6 Segmented Sections, each of 60 32-bit words, including
459    * a 2 byte length overhead
460    * (We don't want to use 10 Segmented Sections as in some scenarios TUP
461    *  uses Segmented Sections when sending results, and if we use TUP on
462    *  the same node, the exhaustion will occur in TUP, which is not what
463    *  we're testing)
464    */
465   const Uint32 mediumPrimaryKeyBytes= (6* 60 * 4) - 2;
466   char smallKey[50];
467   char srcBuff[srcBuffBytes];
468   char smallRowBuf[maxRowBytes];
469   char bigKeyIxBuf[maxRowBytes];
470   char bigAttrIxBuf[maxRowBytes];
471   char bigKeyRowBuf[maxRowBytes];
472   char resultSpace[maxRowBytes];
473 
474   /* Small key for hinting to same TC */
475   Uint32 smallKeySize= setLongVarchar(&smallKey[0],
476                                       "ShortKey",
477                                       8);
478 
479   /* Large value source */
480   memset(srcBuff, 'B', srcBuffBytes);
481 
482   Ndb* pNdb= GETNDB(step);
483 
484   const NdbRecord* baseRecord= ctx->getTab()->getDefaultRecord();
485   const NdbRecord* ixRecord= pNdb->
486     getDictionary()->getIndex(indexName,
487                               ctx->getTab()->getName())->getDefaultRecord();
488 
489   /* Setup buffers
490    * Small row buffer with short key and data in base table record format
491    */
492   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
493                                             smallRowBuf,
494                                             0),
495                  "ShortKey",
496                  8);
497   NdbDictionary::setNull(baseRecord, smallRowBuf, 0, false);
498 
499   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
500                                             smallRowBuf,
501                                             1),
502                  "ShortData",
503                  9);
504   NdbDictionary::setNull(baseRecord, smallRowBuf, 1, false);
505 
506   /* Big index key buffer
507    * Big index key (normal row attribute) in index record format
508    * Index's key is attrid 1 from the base table
509    * This could get confusing !
510    */
511 
512   setLongVarchar(NdbDictionary::getValuePtr(ixRecord,
513                                             bigKeyIxBuf,
514                                             1),
515                  &srcBuff[0],
516                  maxIndexKeyBytes);
517   NdbDictionary::setNull(ixRecord, bigKeyIxBuf, 1, false);
518 
519   /* Big AttrInfo buffer
520    * Small key and large attrinfo in base table record format */
521   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
522                                             bigAttrIxBuf,
523                                             0),
524                  "ShortIXKey",
525                  10);
526 
527   NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 0, false);
528 
529   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
530                                             bigAttrIxBuf,
531                                             1),
532                  &srcBuff[0],
533                  maxIndexKeyBytes);
534   NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 1, false);
535 
536   /* Big key row buffer
537    * Medium sized key and small attrinfo (index key) in
538    * base table record format
539    */
540   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
541                                             bigKeyRowBuf,
542                                             0),
543                  &srcBuff[0],
544                  mediumPrimaryKeyBytes);
545 
546   NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 0, false);
547 
548   setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
549                                             bigKeyRowBuf,
550                                             1),
551                  "ShortIXKey",
552                  10);
553   NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 1, false);
554 
555 
556   /* Start a transaction on a specific node */
557   NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
558                                                 &smallKey[0],
559                                                 smallKeySize);
560   /* Insert a row in the base table with a big PK, and
561    * small data (Unique IX key).  This is used later to lookup
562    * a big PK and cause overflow when reading TRANSID_AI in TC.
563    */
564   CHECKNOTNULL(trans->insertTuple(baseRecord,
565                                   bigKeyRowBuf));
566 
567   CHECKEQUAL(0, trans->execute(NdbTransaction::Commit));
568 
569   NdbRestarter restarter;
570   /* Start a transaction on a specific node */
571   trans= pNdb->startTransaction(ctx->getTab(),
572                                 &smallKey[0],
573                                 smallKeySize);
574   CHECKNOTNULL(trans);
575 
576   /* Activate error insert 8065 in this transaction, limits any
577    * single append/import to 10 sections.
578    */
579   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
580                                           baseRecord,
581                                           ctx->getTab(),
582                                           smallRowBuf,
583                                           &restarter,
584                                           8065));
585 
586   /* Ok, let's try an index read with a big index key.
587    * Since it's part of the same transaction, it'll go via
588    * the same TC.
589    */
590   const NdbOperation* bigRead= trans->readTuple(ixRecord,
591                                                 bigKeyIxBuf,
592                                                 baseRecord,
593                                                 resultSpace);
594 
595   CHECKNOTNULL(bigRead);
596 
597   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
598 
599   /* ZGET_DATABUF_ERR expected */
600   CHECKEQUAL(218, trans->getNdbError().code)
601 
602   trans->close();
603 
604 
605   /* Ok, now a long TCINDXREQ to the same TC - this
606    * has slightly different abort handling since no other
607    * operations exist in this new transaction.
608    */
609   /* Start a transaction on a specific node */
610   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
611                                              &smallKey[0],
612                                              smallKeySize));
613 
614   CHECKNOTNULL(trans->readTuple(ixRecord,
615                                 bigKeyIxBuf,
616                                 baseRecord,
617                                 resultSpace));
618 
619   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
620 
621   /* ZGET_DATABUF_ERR expected */
622   CHECKEQUAL(218, trans->getNdbError().code);
623 
624   trans->close();
625 
626   /* Now a TCINDXREQ that overflows, but is not the last in the
627    * batch, what happens to the other TCINDXREQ in the batch?
628    */
629   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
630                                              &smallKey[0],
631                                              smallKeySize));
632 
633   CHECKNOTNULL(trans->readTuple(ixRecord,
634                                 bigKeyIxBuf,
635                                 baseRecord,
636                                 resultSpace));
637   /* Another read */
638   CHECKNOTNULL(trans->readTuple(ixRecord,
639                                 bigKeyIxBuf,
640                                 baseRecord,
641                                 resultSpace));
642 
643   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
644 
645   /* ZGET_DATABUF_ERR expected */
646   CHECKEQUAL(218, trans->getNdbError().code);
647 
648   trans->close();
649 
650 
651   /* Next we read a tuple with a large primary key via the unique
652    * index.  The index read itself should be fine, but
653    * pulling in the base table PK will cause abort due to overflow
654    * handling TRANSID_AI
655    */
656   /* Start a transaction on a specific node */
657   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
658                                              &smallKey[0],
659                                              smallKeySize));
660 
661   /* Activate error insert 8066 in this transaction, limits a
662    * single import/append to 1 section.
663    * Note that the TRANSID_AI is received by TC as a short-signal
664    * train, so no single append is large, but when the first
665    * segment is used and append starts on the second, it will
666    * fail.
667    */
668   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
669                                           baseRecord,
670                                           ctx->getTab(),
671                                           smallRowBuf,
672                                           &restarter,
673                                           8066));
674   CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
675 
676   CHECKNOTNULL(bigRead= trans->readTuple(ixRecord,
677                                          bigAttrIxBuf,
678                                          baseRecord,
679                                          resultSpace));
680 
681   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
682 
683   /* ZGET_DATABUF_ERR expected */
684   CHECKEQUAL(218, trans->getNdbError().code)
685 
686   trans->close();
687 
688   // TODO Move short signal testing to testUpgrade
689 #if 0
690   /*
691    * Short TCINDXREQ   KeyInfo accumulate       Consume + send long
692    *                     (TCINDXREQ + KEYINFO)
693    * Short TCINDXREQ   AttrInfo accumulate      Consume + send short key
694    *                                             + long AI
695    *                     (TCINDXREQ + ATTRINFO)
696    */
697   /* Now try with a 'short' TCINDXREQ, generated using the old Api
698    * with a big index key value
699    */
700   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
701                                              &smallKey[0],
702                                              smallKeySize));
703 
704   const NdbDictionary::Index* index;
705   CHECKNOTNULL(index= pNdb->getDictionary()->
706                getIndex(indexName,
707                         ctx->getTab()->getName()));
708 
709   NdbIndexOperation* bigReadOldApi;
710   CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index));
711 
712   CHECKEQUAL(0, bigReadOldApi->readTuple());
713   /* We use the attribute id of the index, not the base table here */
714   CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0,
715                                      NdbDictionary::getValuePtr
716                                      (ixRecord,
717                                       bigKeyIxBuf,
718                                       1)));
719 
720   CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1));
721 
722   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
723 
724   /* ZGET_DATABUF_ERR expected */
725   CHECKEQUAL(218, trans->getNdbError().code)
726 
727   trans->close();
728 
729   /* Now try with a 'short' TCINDXREQ, generated using the old Api
730    * with a big attrinfo value
731    */
732   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
733                                              &smallKey[0],
734                                              smallKeySize));
735 
736   NdbIndexOperation* bigUpdateOldApi;
737   CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
738 
739   CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
740   /* We use the attribute id of the index, not the base table here */
741   CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
742                                        NdbDictionary::getValuePtr
743                                        (baseRecord,
744                                         smallRowBuf,
745                                         1)));
746 
747   CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
748                                           NdbDictionary::getValuePtr
749                                           (baseRecord,
750                                            bigAttrIxBuf,
751                                            1)));
752 
753   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
754 
755   /* ZGET_DATABUF_ERR expected */
756   CHECKEQUAL(218, trans->getNdbError().code)
757 
758   trans->close();
759 
760   /* Change error insert so that next TCINDXREQ will grab
761    * all but one SegmentedSection
762    */
763   restarter.insertErrorInAllNodes(8066);
764 
765   /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
766    * can be imported, but the ATTRINFO can't
767    */
768   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
769                                              &smallKey[0],
770                                              smallKeySize));
771 
772   CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
773 
774   CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
775   /* We use the attribute id of the index, not the base table here */
776   CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
777                                        NdbDictionary::getValuePtr
778                                        (baseRecord,
779                                         smallRowBuf,
780                                         1)));
781 
782   CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
783                                           NdbDictionary::getValuePtr
784                                           (baseRecord,
785                                            bigAttrIxBuf,
786                                            1)));
787 
788   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
789 
790   /* ZGET_DATABUF_ERR expected */
791   CHECKEQUAL(218, trans->getNdbError().code)
792 
793   trans->close();
794 
795   /* Change error insert so that there are no SectionSegments */
796   restarter.insertErrorInAllNodes(8067);
797 
798   /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
799    * can't be imported
800    */
801   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
802                                              &smallKey[0],
803                                              smallKeySize));
804 
805   CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
806 
807   CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
808   /* We use the attribute id of the index, not the base table here */
809   CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
810                                        NdbDictionary::getValuePtr
811                                        (baseRecord,
812                                         smallRowBuf,
813                                         1)));
814 
815   CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
816                                           NdbDictionary::getValuePtr
817                                           (baseRecord,
818                                            bigAttrIxBuf,
819                                            1)));
820 
821   CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
822 
823   /* ZGET_DATABUF_ERR expected */
824   CHECKEQUAL(218, trans->getNdbError().code)
825 
826   trans->close();
827 
828 #endif
829 
830   /* Finished with error insert, cleanup the error insertion */
831   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
832                                              &smallKey[0],
833                                              smallKeySize));
834 
835   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
836                                           baseRecord,
837                                           ctx->getTab(),
838                                           smallRowBuf,
839                                           &restarter,
840                                           8068));
841 
842   trans->execute(NdbTransaction::Rollback);
843 
844   CHECKEQUAL(0, trans->getNdbError().code);
845 
846   trans->close();
847 
848   return NDBT_OK;
849 }
850 
851 
testSegmentedSectionScan(NDBT_Context * ctx,NDBT_Step * step)852 int testSegmentedSectionScan(NDBT_Context* ctx, NDBT_Step* step){
853   /* Test that TC handling of segmented section exhaustion is
854    * correct
855    * Since NDBAPI always send long requests, that is all that
856    * we test
857    */
858     /* We just run on one table */
859   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
860     return NDBT_OK;
861 
862   const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
863   char smallKey[50];
864   char smallRowBuf[maxRowBytes];
865 
866   Uint32 smallKeySize= setLongVarchar(&smallKey[0],
867                                       "ShortKey",
868                                       8);
869 
870   const NdbRecord* record= ctx->getTab()->getDefaultRecord();
871 
872   /* Setup buffers
873    * Small row buffer with small key and small data
874    */
875   setLongVarchar(NdbDictionary::getValuePtr(record,
876                                             smallRowBuf,
877                                             0),
878                  "ShortKey",
879                  8);
880   NdbDictionary::setNull(record, smallRowBuf, 0, false);
881 
882   setLongVarchar(NdbDictionary::getValuePtr(record,
883                                             smallRowBuf,
884                                             1),
885                  "ShortData",
886                  9);
887   NdbDictionary::setNull(record, smallRowBuf, 1, false);
888 
889   NdbRestarter restarter;
890   Ndb* pNdb= GETNDB(step);
891 
892   /* Start a transaction on a specific node */
893   NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
894                                                 &smallKey[0],
895                                                 smallKeySize);
896   CHECKNOTNULL(trans);
897 
898   /* Activate error insert 8066 in this transaction, limits a
899    * single import/append to 1 section.
900    */
901   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
902                                           record,
903                                           ctx->getTab(),
904                                           smallRowBuf,
905                                           &restarter,
906                                           8066));
907 
908   /* A scan will always send 2 long sections (Receiver Ids,
909    * AttrInfo)
910    * Let's start a scan with > 2400 bytes of
911    * ATTRINFO and see what happens
912    */
913   NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
914 
915   CHECKNOTNULL(scan);
916 
917   CHECKEQUAL(0, scan->readTuples());
918 
919   /* Create a particularly useless program */
920   NdbInterpretedCode prog;
921 
922   for (Uint32 w=0; w < 2500; w++)
923     CHECKEQUAL(0, prog.load_const_null(1));
924 
925   CHECKEQUAL(0, prog.interpret_exit_ok());
926   CHECKEQUAL(0, prog.finalise());
927 
928   CHECKEQUAL(0, scan->setInterpretedCode(&prog));
929 
930   /* Api doesn't seem to wait for result of scan request */
931   CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
932 
933   CHECKEQUAL(0, trans->getNdbError().code);
934 
935   CHECKEQUAL(-1, scan->nextResult());
936 
937   CHECKEQUAL(217, scan->getNdbError().code);
938 
939   trans->close();
940 
941   /* Finished with error insert, cleanup the error insertion */
942   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
943                                              &smallKey[0],
944                                              smallKeySize));
945 
946   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
947                                           record,
948                                           ctx->getTab(),
949                                           smallRowBuf,
950                                           &restarter,
951                                           8068));
952 
953   CHECKEQUAL(0, trans->execute(NdbTransaction::Rollback));
954 
955   CHECKEQUAL(0, trans->getNdbError().code);
956 
957   trans->close();
958 
959   return NDBT_OK;
960 }
961 
testDropSignalFragments(NDBT_Context * ctx,NDBT_Step * step)962 int testDropSignalFragments(NDBT_Context* ctx, NDBT_Step* step){
963   /* Segmented section exhaustion results in dropped signals
964    * Fragmented signals split one logical signal over multiple
965    * physical signals (to cope with the MAX_SIGNAL_LENGTH=32kB
966    * limitation).
967    * This testcase checks that when individual signals comprising
968    * a fragmented signal (in this case SCANTABREQ) are dropped, the
969    * system behaves correctly.
970    * Correct behaviour is to behave in the same way as if the signal
971    * was not fragmented, and for SCANTABREQ, to return a temporary
972    * resource error.
973    */
974   NdbRestarter restarter;
975   Ndb* pNdb= GETNDB(step);
976 
977   /* SEND > ((2 * MAX_SEND_MESSAGE_BYTESIZE) + SOME EXTRA)
978    * This way we get at least 3 fragments
979    * However, as this is generally > 64kB, it's too much AttrInfo for
980    * a ScanTabReq, so the 'success' case returns error 874
981    */
982   const Uint32 PROG_WORDS= 16500;
983 
984   struct SubCase
985   {
986     Uint32 errorInsertCode;
987     int expectedRc;
988   };
989   const Uint32 numSubCases= 5;
990   const SubCase cases[numSubCases]=
991   /* Error insert   Scanrc */
992     {{          0,     874},  // Normal, success which gives too much AI error
993      {       8074,     217},  // Drop first fragment -> error 217
994      {       8075,     217},  // Drop middle fragment(s) -> error 217
995      {       8076,     217},  // Drop last fragment -> error 217
996      {       8077,     217}}; // Drop all fragments -> error 217
997   const Uint32 numIterations= 50;
998 
999   Uint32 buff[ PROG_WORDS + 10 ]; // 10 extra for final 'return' etc.
1000 
1001   for (Uint32 iteration=0; iteration < (numIterations * numSubCases); iteration++)
1002   {
1003     /* Start a transaction */
1004     NdbTransaction* trans= pNdb->startTransaction();
1005     CHECKNOTNULL(trans);
1006 
1007     SubCase subcase= cases[iteration % numSubCases];
1008 
1009     Uint32 errorInsertVal= subcase.errorInsertCode;
1010     // printf("Inserting error : %u\n", errorInsertVal);
1011     /* We insert the error twice, to bias races between
1012      * error-insert propagation and the succeeding scan
1013      * in favour of error insert winning!
1014      * This problem needs a more general fix
1015      */
1016     CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1017     CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1018 
1019     NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
1020 
1021     CHECKNOTNULL(scan);
1022 
1023     CHECKEQUAL(0, scan->readTuples());
1024 
1025     /* Create a large program, to give a large SCANTABREQ */
1026     NdbInterpretedCode prog(ctx->getTab(), buff, PROG_WORDS + 10);
1027 
1028     for (Uint32 w=0; w < PROG_WORDS; w++)
1029       CHECKEQUAL(0, prog.load_const_null(1));
1030 
1031     CHECKEQUAL(0, prog.interpret_exit_ok());
1032     CHECKEQUAL(0, prog.finalise());
1033 
1034     CHECKEQUAL(0, scan->setInterpretedCode(&prog));
1035 
1036     /* Api doesn't seem to wait for result of scan request */
1037     CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
1038 
1039     CHECKEQUAL(0, trans->getNdbError().code);
1040 
1041     CHECKEQUAL(-1, scan->nextResult());
1042 
1043     int expectedResult= subcase.expectedRc;
1044     CHECKEQUAL(expectedResult, scan->getNdbError().code);
1045 
1046     scan->close();
1047 
1048     trans->close();
1049   }
1050 
1051   restarter.insertErrorInAllNodes(0);
1052 
1053   return NDBT_OK;
1054 }
1055 
create100Tables(NDBT_Context * ctx,NDBT_Step * step)1056 int create100Tables(NDBT_Context* ctx, NDBT_Step* step)
1057 {
1058   Ndb* pNdb = GETNDB(step);
1059   const NdbDictionary::Table* pTab= ctx->getTab();
1060 
1061   /* Run as a 'T1' testcase - do nothing for other tables */
1062   if (strcmp(pTab->getName(), "T1") != 0)
1063     return NDBT_OK;
1064 
1065   for (Uint32 t=0; t < 100; t++)
1066   {
1067     char tabnameBuff[10];
1068     BaseString::snprintf(tabnameBuff, sizeof(tabnameBuff), "TAB%u", t);
1069 
1070     NdbDictionary::Table tab;
1071     tab.setName(tabnameBuff);
1072     NdbDictionary::Column pk;
1073     pk.setName("PK");
1074     pk.setType(NdbDictionary::Column::Varchar);
1075     pk.setLength(20);
1076     pk.setNullable(false);
1077     pk.setPrimaryKey(true);
1078     tab.addColumn(pk);
1079 
1080     pNdb->getDictionary()->dropTable(tab.getName());
1081     if(pNdb->getDictionary()->createTable(tab) != 0)
1082     {
1083       ndbout << "Create table failed with error : "
1084              << pNdb->getDictionary()->getNdbError().code
1085              << " "
1086              << pNdb->getDictionary()->getNdbError().message
1087              << endl;
1088       return NDBT_FAILED;
1089     }
1090 
1091     ndbout << "Created table " << tabnameBuff << endl;
1092   }
1093 
1094   return NDBT_OK;
1095 }
1096 
drop100Tables(NDBT_Context * ctx,NDBT_Step * step)1097 int drop100Tables(NDBT_Context* ctx, NDBT_Step* step)
1098 {
1099   Ndb* pNdb = GETNDB(step);
1100   const NdbDictionary::Table* pTab= ctx->getTab();
1101 
1102   /* Run as a 'T1' testcase - do nothing for other tables */
1103   if (strcmp(pTab->getName(), "T1") != 0)
1104     return NDBT_OK;
1105 
1106   for (Uint32 t=0; t < 100; t++)
1107   {
1108     char tabnameBuff[10];
1109     BaseString::snprintf(tabnameBuff, sizeof(tabnameBuff), "TAB%u", t);
1110 
1111     if (pNdb->getDictionary()->dropTable(tabnameBuff) != 0)
1112     {
1113       ndbout << "Drop table failed with error : "
1114              << pNdb->getDictionary()->getNdbError().code
1115              << " "
1116              << pNdb->getDictionary()->getNdbError().message
1117              << endl;
1118     }
1119     else
1120     {
1121       ndbout << "Dropped table " << tabnameBuff << endl;
1122     }
1123   }
1124 
1125   return NDBT_OK;
1126 }
1127 
dropTable(NDBT_Context * ctx,NDBT_Step * step,Uint32 num)1128 int dropTable(NDBT_Context* ctx, NDBT_Step* step, Uint32 num)
1129 {
1130   Ndb* pNdb = GETNDB(step);
1131   const NdbDictionary::Table* pTab= ctx->getTab();
1132 
1133   /* Run as a 'T1' testcase - do nothing for other tables */
1134   if (strcmp(pTab->getName(), "T1") != 0)
1135     return NDBT_OK;
1136 
1137   char tabnameBuff[10];
1138   BaseString::snprintf(tabnameBuff, sizeof(tabnameBuff), "TAB%u", num);
1139 
1140   if (pNdb->getDictionary()->dropTable(tabnameBuff) != 0)
1141   {
1142     ndbout << "Drop table failed with error : "
1143            << pNdb->getDictionary()->getNdbError().code
1144            << " "
1145            << pNdb->getDictionary()->getNdbError().message
1146            << endl;
1147   }
1148   else
1149   {
1150     ndbout << "Dropped table " << tabnameBuff << endl;
1151   }
1152 
1153   return NDBT_OK;
1154 }
1155 
1156 
1157 enum Scenarios
1158 {
1159 //  NORMAL,  // Commented to save some time.
1160   DROP_TABLE,
1161   RESTART_MASTER,
1162   RESTART_SLAVE,
1163   NUM_SCENARIOS
1164 };
1165 
1166 
1167 enum Tasks
1168 {
1169   WAIT = 0,
1170   DROP_TABLE_REQ = 1,
1171   MASTER_RESTART_REQ = 2,
1172   SLAVE_RESTART_REQ = 3
1173 };
1174 
testWorker(NDBT_Context * ctx,NDBT_Step * step)1175 int testWorker(NDBT_Context* ctx, NDBT_Step* step)
1176 {
1177   /* Run as a 'T1' testcase - do nothing for other tables */
1178   if (strcmp(ctx->getTab()->getName(), "T1") != 0)
1179     return NDBT_OK;
1180 
1181   /* Worker step to run in a separate thread for
1182    * blocking activities
1183    * Generally the blocking of the DIH table definition flush
1184    * blocks the completion of the drop table/node restarts,
1185    * so this must be done in a separate thread to avoid
1186    * deadlocks.
1187    */
1188 
1189   while (!ctx->isTestStopped())
1190   {
1191     ndbout_c("Worker : waiting for request...");
1192     ctx->getPropertyWait("DIHWritesRequest", 1);
1193 
1194     if (!ctx->isTestStopped())
1195     {
1196       Uint32 req = ctx->getProperty("DIHWritesRequestType", (Uint32)0);
1197 
1198       switch ((Tasks) req)
1199       {
1200       case DROP_TABLE_REQ:
1201       {
1202         /* Drop table */
1203         ndbout_c("Worker : dropping table");
1204         if (dropTable(ctx, step, 2) != NDBT_OK)
1205         {
1206           return NDBT_FAILED;
1207         }
1208         ndbout_c("Worker : table dropped.");
1209         break;
1210       }
1211       case MASTER_RESTART_REQ:
1212       {
1213         ndbout_c("Worker : restarting Master");
1214 
1215         NdbRestarter restarter;
1216         int master_nodeid = restarter.getMasterNodeId();
1217         ndbout_c("Worker : Restarting Master (%d)...", master_nodeid);
1218         if (restarter.restartOneDbNode2(master_nodeid,
1219                                         NdbRestarter::NRRF_NOSTART |
1220                                         NdbRestarter::NRRF_FORCE |
1221                                         NdbRestarter::NRRF_ABORT) ||
1222             restarter.waitNodesNoStart(&master_nodeid, 1) ||
1223             restarter.startAll())
1224         {
1225           ndbout_c("Worker : Error restarting Master.");
1226           return NDBT_FAILED;
1227         }
1228         ndbout_c("Worker : Waiting for master to recover...");
1229         if (restarter.waitNodesStarted(&master_nodeid, 1))
1230         {
1231           ndbout_c("Worker : Error waiting for Master restart");
1232           return NDBT_FAILED;
1233         }
1234         ndbout_c("Worker : Master recovered.");
1235         break;
1236       }
1237       case SLAVE_RESTART_REQ:
1238       {
1239         NdbRestarter restarter;
1240         int slave_nodeid = restarter.getRandomNotMasterNodeId(rand());
1241         ndbout_c("Worker : Restarting non-master (%d)...", slave_nodeid);
1242         if (restarter.restartOneDbNode2(slave_nodeid,
1243                                         NdbRestarter::NRRF_NOSTART |
1244                                         NdbRestarter::NRRF_FORCE |
1245                                         NdbRestarter::NRRF_ABORT) ||
1246             restarter.waitNodesNoStart(&slave_nodeid, 1) ||
1247             restarter.startAll())
1248         {
1249           ndbout_c("Worker : Error restarting Slave.");
1250           return NDBT_FAILED;
1251         }
1252         ndbout_c("Worker : Waiting for slave to recover...");
1253         if (restarter.waitNodesStarted(&slave_nodeid, 1))
1254         {
1255           ndbout_c("Worker : Error waiting for Slave restart");
1256           return NDBT_FAILED;
1257         }
1258         ndbout_c("Worker : Slave recovered.");
1259         break;
1260       }
1261       default:
1262       {
1263         break;
1264       }
1265       }
1266     }
1267     ctx->setProperty("DIHWritesRequestType", (Uint32) 0);
1268     ctx->setProperty("DIHWritesRequest", (Uint32) 2);
1269   }
1270 
1271   ndbout_c("Worker, done.");
1272   return NDBT_OK;
1273 }
1274 
testSlowDihFileWrites(NDBT_Context * ctx,NDBT_Step * step)1275 int testSlowDihFileWrites(NDBT_Context* ctx, NDBT_Step* step)
1276 {
1277   /* Testcase checks behaviour with slow flushing of DIH table definitions
1278    * This caused problems in the past by exhausting the DIH page pool
1279    * Now there's a concurrent operations limit.
1280    * Check that it behaves with many queued ops, parallel drop/node restarts
1281    */
1282 
1283   /* Run as a 'T1' testcase - do nothing for other tables */
1284   if (strcmp(ctx->getTab()->getName(), "T1") != 0)
1285     return NDBT_OK;
1286 
1287   /* 1. Activate slow write error insert
1288    * 2. Trigger LCP
1289    * 3. Wait some time, periodically producing info on
1290    *    the internal state
1291    * 4. Perform some parallel action (drop table/node restarts)
1292    * 5. Wait some time, periodically producing info on
1293    *    the internal state
1294    * 6. Clear the error insert
1295    * 7. Wait a little longer
1296    * 8. Done.
1297    */
1298   NdbRestarter restarter;
1299 
1300   for (Uint32 scenario = 0;  scenario < NUM_SCENARIOS; scenario++)
1301   {
1302     ndbout_c("Inserting error 7235");
1303     restarter.insertErrorInAllNodes(7235);
1304 
1305     ndbout_c("Triggering LCP");
1306     int dumpArg = 7099;
1307     restarter.dumpStateAllNodes(&dumpArg, 1);
1308 
1309     const Uint32 periodSeconds = 10;
1310     Uint32 waitPeriods = 6;
1311     dumpArg = 7032;
1312 
1313     for (Uint32 p=0; p<waitPeriods; p++)
1314     {
1315       if (p == 3)
1316       {
1317         switch ((Scenarios) scenario)
1318         {
1319         case DROP_TABLE:
1320         {
1321           /* Drop one of the early-created tables */
1322           ndbout_c("Requesting DROP TABLE");
1323           ctx->setProperty("DIHWritesRequestType", (Uint32) DROP_TABLE_REQ);
1324           ctx->setProperty("DIHWritesRequest", (Uint32) 1);
1325           break;
1326         }
1327         case RESTART_MASTER:
1328         {
1329           ndbout_c("Requesting Master restart");
1330           ctx->setProperty("DIHWritesRequestType", (Uint32) MASTER_RESTART_REQ);
1331           ctx->setProperty("DIHWritesRequest", (Uint32) 1);
1332 
1333           break;
1334         }
1335         case RESTART_SLAVE:
1336         {
1337           ndbout_c("Requesting Slave restart");
1338           ctx->setProperty("DIHWritesRequestType", (Uint32) SLAVE_RESTART_REQ);
1339           ctx->setProperty("DIHWritesRequest", (Uint32) 1);
1340 
1341           break;
1342         }
1343         default:
1344           break;
1345         }
1346       }
1347 
1348       ndbout_c("Dumping DIH page info to ndbd stdout");
1349       restarter.dumpStateAllNodes(&dumpArg, 1);
1350       NdbSleep_MilliSleep(periodSeconds * 1000);
1351     }
1352 
1353     ndbout_c("Clearing error insert...");
1354     restarter.insertErrorInAllNodes(0);
1355 
1356     waitPeriods = 2;
1357     for (Uint32 p=0; p<waitPeriods; p++)
1358     {
1359       ndbout_c("Dumping DIH page info to ndbd stdout");
1360       restarter.dumpStateAllNodes(&dumpArg, 1);
1361       NdbSleep_MilliSleep(periodSeconds * 1000);
1362     }
1363 
1364     ndbout_c("Waiting for worker to finish task...");
1365     ctx->getPropertyWait("DIHWritesRequest", 2);
1366 
1367     if (ctx->isTestStopped())
1368       return NDBT_OK;
1369 
1370     ndbout_c("Done.");
1371   }
1372 
1373   /* Finish up */
1374   ctx->stopTest();
1375 
1376   return NDBT_OK;
1377 }
1378 
testNdbfsBulkOpen(NDBT_Context * ctx,NDBT_Step * step)1379 int testNdbfsBulkOpen(NDBT_Context* ctx, NDBT_Step* step)
1380 {
1381   NdbRestarter restarter;
1382 
1383   g_err << "Getting all nodes to create + open a number of files in parallel"
1384         << endl;
1385   int dumpArg = 667;
1386   CHECK(restarter.dumpStateAllNodes(&dumpArg, 1) == 0);
1387 
1388   ndbout_c("Giving time for the open to complete");
1389   NdbSleep_MilliSleep(30*1000);
1390 
1391   ndbout_c("Crash DB nodes that have not completed opening files");
1392   dumpArg = 668;
1393   CHECK(restarter.dumpStateAllNodes(&dumpArg, 1) == 0);
1394 
1395   g_err << "Checking any data node crashed" << endl;
1396   uint num_nodes = restarter.getNumDbNodes();
1397   int *dead_nodes = new int[num_nodes];
1398   for (uint i = 0; i < num_nodes; ++i)
1399   {
1400     dead_nodes[i] = 0;
1401   }
1402   int dead_node = restarter.checkClusterAlive(dead_nodes, num_nodes);
1403   if (dead_node != 0)
1404   {
1405     g_err << "Data node " << dead_node << " crashed" << endl;
1406   }
1407   CHECK(dead_node == 0);
1408 
1409   g_err << "Restarting nodes to get rid of error insertion effects"
1410         << endl;
1411   // restartAll(initial=true) doesn't remove CMVMI either
1412   CHECK(restarter.restartAll() == 0);
1413   const int timeout = 300;
1414   CHECK(restarter.waitClusterStarted(timeout) == 0);
1415   Ndb* pNdb = GETNDB(step);
1416   CHECK(pNdb->waitUntilReady(timeout) == 0);
1417   CHK_NDB_READY(pNdb);
1418 
1419   return NDBT_OK;
1420 }
1421 
1422 
1423 NDBT_TESTSUITE(testLimits);
1424 
1425 TESTCASE("ExhaustSegmentedSectionPk",
1426          "Test behaviour at Segmented Section exhaustion for PK"){
1427   INITIALIZER(testSegmentedSectionPk);
1428 }
1429 
1430 TESTCASE("ExhaustSegmentedSectionIX",
1431          "Test behaviour at Segmented Section exhaustion for Unique index"){
1432   INITIALIZER(testSegmentedSectionIx);
1433 }
1434 TESTCASE("ExhaustSegmentedSectionScan",
1435          "Test behaviour at Segmented Section exhaustion for Scan"){
1436   INITIALIZER(testSegmentedSectionScan);
1437 }
1438 
1439 TESTCASE("DropSignalFragments",
1440          "Test behaviour of Segmented Section exhaustion with fragmented signals"){
1441   INITIALIZER(testDropSignalFragments);
1442 }
1443 
1444 TESTCASE("SlowDihFileWrites",
1445          "Test behaviour of slow Dih table file writes")
1446 {
1447   INITIALIZER(create100Tables);
1448   STEP(testWorker);
1449   STEP(testSlowDihFileWrites);
1450   FINALIZER(drop100Tables);
1451 }
1452 TESTCASE("NdbfsBulkOpen",
1453          "Test behaviour of NdbFs bulk file open")
1454 {
1455   INITIALIZER(testNdbfsBulkOpen);
1456 }
1457 
NDBT_TESTSUITE_END(testLimits)1458 NDBT_TESTSUITE_END(testLimits)
1459 
1460 int main(int argc, const char** argv){
1461   ndb_init();
1462   NDBT_TESTSUITE_INSTANCE(testLimits);
1463   return testLimits.execute(argc, argv);
1464 }
1465