1 /* Copyright (C) 2008 MySQL AB, 2008, 2009 Sun Microsystems, Inc.
2 All rights reserved. Use is subject to license terms.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License, version 2.0,
6 as published by the Free Software Foundation.
7
8 This program is also distributed with certain software (including
9 but not limited to OpenSSL) that is licensed under separate terms,
10 as designated in a particular file or component or in included license
11 documentation. The authors of MySQL hereby grant you an additional
12 permission to link the program and your derivative works with the
13 separately licensed software that they have included with MySQL.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
23
24 #include <NDBT.hpp>
25 #include <NDBT_Test.hpp>
26 #include <NdbRestarter.hpp>
27
28 #define CHECKNOTNULL(p) if ((p) == NULL) { \
29 ndbout << "Error at line " << __LINE__ << endl; \
30 ERR(trans->getNdbError()); \
31 trans->close(); \
32 return NDBT_FAILED; }
33
34 #define CHECKEQUAL(v, e) if ((e) != (v)) { \
35 ndbout << "Error at line " << __LINE__ << \
36 " expected " << v << endl; \
37 ERR(trans->getNdbError()); \
38 trans->close(); \
39 return NDBT_FAILED; }
40
41
42 /* Setup memory as a long Varchar with 2 bytes of
43 * length information
44 */
setLongVarchar(char * where,const char * what,Uint32 sz)45 Uint32 setLongVarchar(char* where, const char* what, Uint32 sz)
46 {
47 where[0]=sz & 0xff;
48 where[1]=(sz >> 8) & 0xff;
49 memcpy(&where[2], what, sz);
50 return (sz + 2);
51 }
52
53
54 /* Activate the given error insert in TC block
55 * This is used for error insertion where a TCKEYREQ
56 * is required to activate the error
57 */
activateErrorInsert(NdbTransaction * trans,const NdbRecord * record,const NdbDictionary::Table * tab,const char * buf,NdbRestarter * restarter,Uint32 val)58 int activateErrorInsert(NdbTransaction* trans,
59 const NdbRecord* record,
60 const NdbDictionary::Table* tab,
61 const char* buf,
62 NdbRestarter* restarter,
63 Uint32 val)
64 {
65 /* We insert the error twice to avoid what appear to be
66 * races between the error insert and the subsequent
67 * tests
68 * Alternatively we could sleep here.
69 */
70 if (restarter->insertErrorInAllNodes(val) != 0){
71 g_err << "error insert 1 (" << val << ") failed" << endl;
72 return NDBT_FAILED;
73 }
74 if (restarter->insertErrorInAllNodes(val) != 0){
75 g_err << "error insert 2 (" << val << ") failed" << endl;
76 return NDBT_FAILED;
77 }
78
79 NdbOperation* insert= trans->getNdbOperation(tab);
80
81 CHECKNOTNULL(insert);
82
83 CHECKEQUAL(0, insert->insertTuple());
84
85 CHECKEQUAL(0, insert->equal((Uint32) 0,
86 NdbDictionary::getValuePtr
87 (record,
88 buf,
89 0)));
90 CHECKEQUAL(0, insert->setValue(1,
91 NdbDictionary::getValuePtr
92 (record,
93 buf,
94 1)));
95
96 CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
97
98 CHECKEQUAL(0, trans->getNdbError().code);
99
100 return NDBT_OK;
101 }
102
103 /* Test for correct behaviour using primary key operations
104 * when an NDBD node's SegmentedSection pool is exhausted.
105 */
testSegmentedSectionPk(NDBT_Context * ctx,NDBT_Step * step)106 int testSegmentedSectionPk(NDBT_Context* ctx, NDBT_Step* step){
107 /*
108 * Signal type Exhausted @ How
109 * -----------------------------------------------------
110 * Long TCKEYREQ Initial import Consume + send
111 * Long TCKEYREQ Initial import, not first
112 * TCKEYREQ in batch Consume + send
113 * Long TCKEYREQ Initial import, not last
114 * TCKEYREQ in batch Consume + send
115 * No testing of short TCKEYREQ variants as they cannot be
116 * generated in mysql-5.1-telco-6.4+
117 * TODO : Add short variant testing to testUpgrade.
118 */
119
120 /* We just run on one table */
121 if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
122 return NDBT_OK;
123
124 const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
125 const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
126 const Uint32 maxAttrBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytes;
127 char smallKey[50];
128 char srcBuff[srcBuffBytes];
129 char smallRowBuf[maxRowBytes];
130 char bigKeyRowBuf[maxRowBytes];
131 char bigAttrRowBuf[maxRowBytes];
132
133 /* Small key for hinting to same TC */
134 Uint32 smallKeySize= setLongVarchar(&smallKey[0],
135 "ShortKey",
136 8);
137
138 /* Large value source */
139 memset(srcBuff, 'B', srcBuffBytes);
140
141 const NdbRecord* record= ctx->getTab()->getDefaultRecord();
142
143 /* Setup buffers
144 * Small row buffer with small key and small data
145 */
146 setLongVarchar(NdbDictionary::getValuePtr(record,
147 smallRowBuf,
148 0),
149 "ShortKey",
150 8);
151 NdbDictionary::setNull(record, smallRowBuf, 0, false);
152
153 setLongVarchar(NdbDictionary::getValuePtr(record,
154 smallRowBuf,
155 1),
156 "ShortData",
157 9);
158 NdbDictionary::setNull(record, smallRowBuf, 1, false);
159
160 /* Big key buffer with big key and small data*/
161 setLongVarchar(NdbDictionary::getValuePtr(record,
162 bigKeyRowBuf,
163 0),
164 &srcBuff[0],
165 srcBuffBytes);
166 NdbDictionary::setNull(record, bigKeyRowBuf, 0, false);
167
168 setLongVarchar(NdbDictionary::getValuePtr(record,
169 bigKeyRowBuf,
170 1),
171 "ShortData",
172 9);
173 NdbDictionary::setNull(record, bigKeyRowBuf, 1, false);
174
175 /* Big AttrInfo buffer with small key and big data */
176 setLongVarchar(NdbDictionary::getValuePtr(record,
177 bigAttrRowBuf,
178 0),
179 "ShortKey",
180 8);
181 NdbDictionary::setNull(record, bigAttrRowBuf, 0, false);
182
183 setLongVarchar(NdbDictionary::getValuePtr(record,
184 bigAttrRowBuf,
185 1),
186 &srcBuff[0],
187 maxAttrBytes);
188 NdbDictionary::setNull(record, bigAttrRowBuf, 1, false);
189
190 NdbRestarter restarter;
191 Ndb* pNdb= GETNDB(step);
192
193 /* Start a transaction on a specific node */
194 NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
195 &smallKey[0],
196 smallKeySize);
197 CHECKNOTNULL(trans);
198
199 /* Activate error insert 8065 in this transaction, limits
200 * any single import/append to 1 section
201 */
202 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
203 record,
204 ctx->getTab(),
205 smallRowBuf,
206 &restarter,
207 8065));
208
209 /* Ok, let's try an insert with a key bigger than 1 section.
210 * Since it's part of the same transaction, it'll go via
211 * the same TC.
212 */
213 const NdbOperation* bigInsert = trans->insertTuple(record, bigKeyRowBuf);
214
215 CHECKNOTNULL(bigInsert);
216
217 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
218
219 /* ZGET_DATABUF_ERR expected */
220 CHECKEQUAL(218, trans->getNdbError().code)
221
222 trans->close();
223
224 /* Ok, now a long TCKEYREQ to the same TC - this
225 * has slightly different abort handling since no other
226 * operations exist in this new transaction.
227 * We also change it so that import overflow occurs
228 * on the AttrInfo section
229 */
230 /* Start transaction on the same node */
231 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
232 &smallKey[0],
233 smallKeySize));
234
235
236 CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
237
238 CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
239
240 /* ZGET_DATABUF_ERR expected */
241 CHECKEQUAL(218, trans->getNdbError().code);
242
243 trans->close();
244
245 /* Ok, now a long TCKEYREQ where we run out of SegmentedSections
246 * on the first TCKEYREQ, but there are other TCKEYREQs following
247 * in the same batch. Check that abort handling is correct
248 */
249 /* Start transaction on the same node */
250 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
251 &smallKey[0],
252 smallKeySize));
253 /* First op in batch, will cause overflow */
254 CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
255
256 /* Second op in batch, what happens to it? */
257 const NdbOperation* secondOp;
258 CHECKNOTNULL(secondOp = trans->insertTuple(record, bigAttrRowBuf));
259
260
261 CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
262
263 /* ZGET_DATABUF_ERR expected */
264 CHECKEQUAL(218, trans->getNdbError().code);
265
266 trans->close();
267
268 /* Now try with a 'short' TCKEYREQ, generated using the old Api
269 * with a big key value
270 */
271 /* Start transaction on the same node */
272 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
273 &smallKey[0],
274 smallKeySize));
275
276 NdbOperation* bigInsertOldApi;
277 CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
278
279 CHECKEQUAL(0, bigInsertOldApi->insertTuple());
280 CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
281 NdbDictionary::getValuePtr
282 (record,
283 bigKeyRowBuf,
284 0)));
285 CHECKEQUAL(0, bigInsertOldApi->setValue(1,
286 NdbDictionary::getValuePtr
287 (record,
288 bigKeyRowBuf,
289 1)));
290
291 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
292
293 /* ZGET_DATABUF_ERR expected */
294 CHECKEQUAL(218, trans->getNdbError().code)
295
296 trans->close();
297
298 /* Now try with a 'short' TCKEYREQ, generated using the old Api
299 * with a big data value
300 */
301 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
302 &smallKey[0],
303 smallKeySize));
304
305 CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
306
307 CHECKEQUAL(0, bigInsertOldApi->insertTuple());
308 CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
309 NdbDictionary::getValuePtr
310 (record,
311 bigAttrRowBuf,
312 0)));
313 CHECKEQUAL(0, bigInsertOldApi->setValue(1,
314 NdbDictionary::getValuePtr
315 (record,
316 bigAttrRowBuf,
317 1)));
318
319 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
320
321 /* ZGET_DATABUF_ERR expected */
322 CHECKEQUAL(218, trans->getNdbError().code)
323
324 trans->close();
325
326 // TODO : Add code to testUpgrade
327 #if 0
328 /*
329 * Short TCKEYREQ KeyInfo accumulate Consume + send long
330 * (TCKEYREQ + KEYINFO)
331 * Short TCKEYREQ AttrInfo accumulate Consume + send short key
332 * + long AI
333 * (TCKEYREQ + ATTRINFO)
334 */
335 /* Change error insert so that next TCKEYREQ will grab
336 * all but one SegmentedSection so that we can then test SegmentedSection
337 * exhaustion when importing the Key/AttrInfo words from the
338 * TCKEYREQ signal itself.
339 */
340 restarter.insertErrorInAllNodes(8066);
341
342
343 /* Now a 'short' TCKEYREQ, there will be space to import the
344 * short key, but not the AttrInfo
345 */
346 /* Start transaction on same node */
347 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
348 &smallKey[0],
349 smallKeySize));
350
351 CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
352
353 CHECKEQUAL(0, bigInsertOldApi->insertTuple());
354 CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
355 NdbDictionary::getValuePtr
356 (record,
357 smallRowBuf,
358 0)));
359 CHECKEQUAL(0, bigInsertOldApi->setValue(1, NdbDictionary::getValuePtr
360 (record,
361 smallRowBuf,
362 1)));
363
364 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
365
366 /* ZGET_DATABUF_ERR expected */
367 CHECKEQUAL(218, trans->getNdbError().code)
368
369 trans->close();
370
371 /* Change error insert so that there are no SectionSegments
372 * This will cause failure when attempting to import the
373 * KeyInfo from the TCKEYREQ
374 */
375 restarter.insertErrorInAllNodes(8067);
376
377 /* Now a 'short' TCKEYREQ - there will be no space to import the key */
378 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
379 &smallKey[0],
380 smallKeySize));
381
382 CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
383
384 CHECKEQUAL(0, bigInsertOldApi->insertTuple());
385 CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
386 NdbDictionary::getValuePtr
387 (record,
388 smallRowBuf,
389 0)));
390 CHECKEQUAL(0, bigInsertOldApi->setValue(1,
391 NdbDictionary::getValuePtr
392 (record,
393 smallRowBuf,
394 1)));
395
396 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
397
398 /* ZGET_DATABUF_ERR expected */
399 CHECKEQUAL(218, trans->getNdbError().code)
400
401 trans->close();
402 #endif
403
404 /* Finished with error insert, cleanup the error insertion
405 * Error insert 8068 will free the hoarded segments
406 */
407 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
408 &smallKey[0],
409 smallKeySize));
410
411 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
412 record,
413 ctx->getTab(),
414 smallRowBuf,
415 &restarter,
416 8068));
417
418 trans->execute(NdbTransaction::Rollback);
419
420 CHECKEQUAL(0, trans->getNdbError().code);
421
422 trans->close();
423
424 return NDBT_OK;
425 }
426
427 /* Test for correct behaviour using unique key operations
428 * when an NDBD node's SegmentedSection pool is exhausted.
429 */
testSegmentedSectionIx(NDBT_Context * ctx,NDBT_Step * step)430 int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){
431 /*
432 * Signal type Exhausted @ How
433 * -----------------------------------------------------
434 * Long TCINDXREQ Initial import Consume + send
435 * Long TCINDXREQ Build second TCKEYREQ Consume + send short
436 * w. long base key
437 */
438 /* We will generate :
439 * 10 SS left :
440 * Long IndexReq with too long Key/AttrInfo
441 * 1 SS left :
442 * Long IndexReq read with short Key + Attrinfo to long
443 * base table Key
444 */
445 /* We just run on one table */
446 if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0)
447 return NDBT_OK;
448
449 const char* indexName= "WIDE_2COL_IX$NDBT_IDX0";
450 const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
451 const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
452 const Uint32 maxIndexKeyBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytesIndex;
453 /* We want to use 6 Segmented Sections, each of 60 32-bit words, including
454 * a 2 byte length overhead
455 * (We don't want to use 10 Segmented Sections as in some scenarios TUP
456 * uses Segmented Sections when sending results, and if we use TUP on
457 * the same node, the exhaustion will occur in TUP, which is not what
458 * we're testing)
459 */
460 const Uint32 mediumPrimaryKeyBytes= (6* 60 * 4) - 2;
461 char smallKey[50];
462 char srcBuff[srcBuffBytes];
463 char smallRowBuf[maxRowBytes];
464 char bigKeyIxBuf[maxRowBytes];
465 char bigAttrIxBuf[maxRowBytes];
466 char bigKeyRowBuf[maxRowBytes];
467 char resultSpace[maxRowBytes];
468
469 /* Small key for hinting to same TC */
470 Uint32 smallKeySize= setLongVarchar(&smallKey[0],
471 "ShortKey",
472 8);
473
474 /* Large value source */
475 memset(srcBuff, 'B', srcBuffBytes);
476
477 Ndb* pNdb= GETNDB(step);
478
479 const NdbRecord* baseRecord= ctx->getTab()->getDefaultRecord();
480 const NdbRecord* ixRecord= pNdb->
481 getDictionary()->getIndex(indexName,
482 ctx->getTab()->getName())->getDefaultRecord();
483
484 /* Setup buffers
485 * Small row buffer with short key and data in base table record format
486 */
487 setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
488 smallRowBuf,
489 0),
490 "ShortKey",
491 8);
492 NdbDictionary::setNull(baseRecord, smallRowBuf, 0, false);
493
494 setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
495 smallRowBuf,
496 1),
497 "ShortData",
498 9);
499 NdbDictionary::setNull(baseRecord, smallRowBuf, 1, false);
500
501 /* Big index key buffer
502 * Big index key (normal row attribute) in index record format
503 * Index's key is attrid 1 from the base table
504 * This could get confusing !
505 */
506
507 setLongVarchar(NdbDictionary::getValuePtr(ixRecord,
508 bigKeyIxBuf,
509 1),
510 &srcBuff[0],
511 maxIndexKeyBytes);
512 NdbDictionary::setNull(ixRecord, bigKeyIxBuf, 1, false);
513
514 /* Big AttrInfo buffer
515 * Small key and large attrinfo in base table record format */
516 setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
517 bigAttrIxBuf,
518 0),
519 "ShortIXKey",
520 10);
521
522 NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 0, false);
523
524 setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
525 bigAttrIxBuf,
526 1),
527 &srcBuff[0],
528 maxIndexKeyBytes);
529 NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 1, false);
530
531 /* Big key row buffer
532 * Medium sized key and small attrinfo (index key) in
533 * base table record format
534 */
535 setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
536 bigKeyRowBuf,
537 0),
538 &srcBuff[0],
539 mediumPrimaryKeyBytes);
540
541 NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 0, false);
542
543 setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
544 bigKeyRowBuf,
545 1),
546 "ShortIXKey",
547 10);
548 NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 1, false);
549
550
551 /* Start a transaction on a specific node */
552 NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
553 &smallKey[0],
554 smallKeySize);
555 /* Insert a row in the base table with a big PK, and
556 * small data (Unique IX key). This is used later to lookup
557 * a big PK and cause overflow when reading TRANSID_AI in TC.
558 */
559 CHECKNOTNULL(trans->insertTuple(baseRecord,
560 bigKeyRowBuf));
561
562 CHECKEQUAL(0, trans->execute(NdbTransaction::Commit));
563
564 NdbRestarter restarter;
565 /* Start a transaction on a specific node */
566 trans= pNdb->startTransaction(ctx->getTab(),
567 &smallKey[0],
568 smallKeySize);
569 CHECKNOTNULL(trans);
570
571 /* Activate error insert 8065 in this transaction, limits any
572 * single append/import to 10 sections.
573 */
574 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
575 baseRecord,
576 ctx->getTab(),
577 smallRowBuf,
578 &restarter,
579 8065));
580
581 /* Ok, let's try an index read with a big index key.
582 * Since it's part of the same transaction, it'll go via
583 * the same TC.
584 */
585 const NdbOperation* bigRead= trans->readTuple(ixRecord,
586 bigKeyIxBuf,
587 baseRecord,
588 resultSpace);
589
590 CHECKNOTNULL(bigRead);
591
592 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
593
594 /* ZGET_DATABUF_ERR expected */
595 CHECKEQUAL(218, trans->getNdbError().code)
596
597 trans->close();
598
599
600 /* Ok, now a long TCINDXREQ to the same TC - this
601 * has slightly different abort handling since no other
602 * operations exist in this new transaction.
603 */
604 /* Start a transaction on a specific node */
605 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
606 &smallKey[0],
607 smallKeySize));
608
609 CHECKNOTNULL(trans->readTuple(ixRecord,
610 bigKeyIxBuf,
611 baseRecord,
612 resultSpace));
613
614 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
615
616 /* ZGET_DATABUF_ERR expected */
617 CHECKEQUAL(218, trans->getNdbError().code);
618
619 trans->close();
620
621 /* Now a TCINDXREQ that overflows, but is not the last in the
622 * batch, what happens to the other TCINDXREQ in the batch?
623 */
624 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
625 &smallKey[0],
626 smallKeySize));
627
628 CHECKNOTNULL(trans->readTuple(ixRecord,
629 bigKeyIxBuf,
630 baseRecord,
631 resultSpace));
632 /* Another read */
633 CHECKNOTNULL(trans->readTuple(ixRecord,
634 bigKeyIxBuf,
635 baseRecord,
636 resultSpace));
637
638 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
639
640 /* ZGET_DATABUF_ERR expected */
641 CHECKEQUAL(218, trans->getNdbError().code);
642
643 trans->close();
644
645
646 /* Next we read a tuple with a large primary key via the unique
647 * index. The index read itself should be fine, but
648 * pulling in the base table PK will cause abort due to overflow
649 * handling TRANSID_AI
650 */
651 /* Start a transaction on a specific node */
652 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
653 &smallKey[0],
654 smallKeySize));
655
656 /* Activate error insert 8066 in this transaction, limits a
657 * single import/append to 1 section.
658 * Note that the TRANSID_AI is received by TC as a short-signal
659 * train, so no single append is large, but when the first
660 * segment is used and append starts on the second, it will
661 * fail.
662 */
663 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
664 baseRecord,
665 ctx->getTab(),
666 smallRowBuf,
667 &restarter,
668 8066));
669 CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
670
671 CHECKNOTNULL(bigRead= trans->readTuple(ixRecord,
672 bigAttrIxBuf,
673 baseRecord,
674 resultSpace));
675
676 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
677
678 /* ZGET_DATABUF_ERR expected */
679 CHECKEQUAL(218, trans->getNdbError().code)
680
681 trans->close();
682
683 // TODO Move short signal testing to testUpgrade
684 #if 0
685 /*
686 * Short TCINDXREQ KeyInfo accumulate Consume + send long
687 * (TCINDXREQ + KEYINFO)
688 * Short TCINDXREQ AttrInfo accumulate Consume + send short key
689 * + long AI
690 * (TCINDXREQ + ATTRINFO)
691 */
692 /* Now try with a 'short' TCINDXREQ, generated using the old Api
693 * with a big index key value
694 */
695 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
696 &smallKey[0],
697 smallKeySize));
698
699 const NdbDictionary::Index* index;
700 CHECKNOTNULL(index= pNdb->getDictionary()->
701 getIndex(indexName,
702 ctx->getTab()->getName()));
703
704 NdbIndexOperation* bigReadOldApi;
705 CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index));
706
707 CHECKEQUAL(0, bigReadOldApi->readTuple());
708 /* We use the attribute id of the index, not the base table here */
709 CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0,
710 NdbDictionary::getValuePtr
711 (ixRecord,
712 bigKeyIxBuf,
713 1)));
714
715 CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1));
716
717 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
718
719 /* ZGET_DATABUF_ERR expected */
720 CHECKEQUAL(218, trans->getNdbError().code)
721
722 trans->close();
723
724 /* Now try with a 'short' TCINDXREQ, generated using the old Api
725 * with a big attrinfo value
726 */
727 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
728 &smallKey[0],
729 smallKeySize));
730
731 NdbIndexOperation* bigUpdateOldApi;
732 CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
733
734 CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
735 /* We use the attribute id of the index, not the base table here */
736 CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
737 NdbDictionary::getValuePtr
738 (baseRecord,
739 smallRowBuf,
740 1)));
741
742 CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
743 NdbDictionary::getValuePtr
744 (baseRecord,
745 bigAttrIxBuf,
746 1)));
747
748 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
749
750 /* ZGET_DATABUF_ERR expected */
751 CHECKEQUAL(218, trans->getNdbError().code)
752
753 trans->close();
754
755 /* Change error insert so that next TCINDXREQ will grab
756 * all but one SegmentedSection
757 */
758 restarter.insertErrorInAllNodes(8066);
759
760 /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
761 * can be imported, but the ATTRINFO can't
762 */
763 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
764 &smallKey[0],
765 smallKeySize));
766
767 CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
768
769 CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
770 /* We use the attribute id of the index, not the base table here */
771 CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
772 NdbDictionary::getValuePtr
773 (baseRecord,
774 smallRowBuf,
775 1)));
776
777 CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
778 NdbDictionary::getValuePtr
779 (baseRecord,
780 bigAttrIxBuf,
781 1)));
782
783 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
784
785 /* ZGET_DATABUF_ERR expected */
786 CHECKEQUAL(218, trans->getNdbError().code)
787
788 trans->close();
789
790 /* Change error insert so that there are no SectionSegments */
791 restarter.insertErrorInAllNodes(8067);
792
793 /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
794 * can't be imported
795 */
796 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
797 &smallKey[0],
798 smallKeySize));
799
800 CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
801
802 CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
803 /* We use the attribute id of the index, not the base table here */
804 CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
805 NdbDictionary::getValuePtr
806 (baseRecord,
807 smallRowBuf,
808 1)));
809
810 CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
811 NdbDictionary::getValuePtr
812 (baseRecord,
813 bigAttrIxBuf,
814 1)));
815
816 CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
817
818 /* ZGET_DATABUF_ERR expected */
819 CHECKEQUAL(218, trans->getNdbError().code)
820
821 trans->close();
822
823 #endif
824
825 /* Finished with error insert, cleanup the error insertion */
826 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
827 &smallKey[0],
828 smallKeySize));
829
830 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
831 baseRecord,
832 ctx->getTab(),
833 smallRowBuf,
834 &restarter,
835 8068));
836
837 trans->execute(NdbTransaction::Rollback);
838
839 CHECKEQUAL(0, trans->getNdbError().code);
840
841 trans->close();
842
843 return NDBT_OK;
844 }
845
846
testSegmentedSectionScan(NDBT_Context * ctx,NDBT_Step * step)847 int testSegmentedSectionScan(NDBT_Context* ctx, NDBT_Step* step){
848 /* Test that TC handling of segmented section exhaustion is
849 * correct
850 * Since NDBAPI always send long requests, that is all that
851 * we test
852 */
853 /* We just run on one table */
854 if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
855 return NDBT_OK;
856
857 const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
858 char smallKey[50];
859 char smallRowBuf[maxRowBytes];
860
861 Uint32 smallKeySize= setLongVarchar(&smallKey[0],
862 "ShortKey",
863 8);
864
865 const NdbRecord* record= ctx->getTab()->getDefaultRecord();
866
867 /* Setup buffers
868 * Small row buffer with small key and small data
869 */
870 setLongVarchar(NdbDictionary::getValuePtr(record,
871 smallRowBuf,
872 0),
873 "ShortKey",
874 8);
875 NdbDictionary::setNull(record, smallRowBuf, 0, false);
876
877 setLongVarchar(NdbDictionary::getValuePtr(record,
878 smallRowBuf,
879 1),
880 "ShortData",
881 9);
882 NdbDictionary::setNull(record, smallRowBuf, 1, false);
883
884 NdbRestarter restarter;
885 Ndb* pNdb= GETNDB(step);
886
887 /* Start a transaction on a specific node */
888 NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
889 &smallKey[0],
890 smallKeySize);
891 CHECKNOTNULL(trans);
892
893 /* Activate error insert 8066 in this transaction, limits a
894 * single import/append to 1 section.
895 */
896 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
897 record,
898 ctx->getTab(),
899 smallRowBuf,
900 &restarter,
901 8066));
902
903 /* A scan will always send 2 long sections (Receiver Ids,
904 * AttrInfo)
905 * Let's start a scan with > 2400 bytes of
906 * ATTRINFO and see what happens
907 */
908 NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
909
910 CHECKNOTNULL(scan);
911
912 CHECKEQUAL(0, scan->readTuples());
913
914 /* Create a particularly useless program */
915 NdbInterpretedCode prog;
916
917 for (Uint32 w=0; w < 2500; w++)
918 CHECKEQUAL(0, prog.load_const_null(1));
919
920 CHECKEQUAL(0, prog.interpret_exit_ok());
921 CHECKEQUAL(0, prog.finalise());
922
923 CHECKEQUAL(0, scan->setInterpretedCode(&prog));
924
925 /* Api doesn't seem to wait for result of scan request */
926 CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
927
928 CHECKEQUAL(0, trans->getNdbError().code);
929
930 CHECKEQUAL(-1, scan->nextResult());
931
932 CHECKEQUAL(217, scan->getNdbError().code);
933
934 trans->close();
935
936 /* Finished with error insert, cleanup the error insertion */
937 CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
938 &smallKey[0],
939 smallKeySize));
940
941 CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
942 record,
943 ctx->getTab(),
944 smallRowBuf,
945 &restarter,
946 8068));
947
948 CHECKEQUAL(0, trans->execute(NdbTransaction::Rollback));
949
950 CHECKEQUAL(0, trans->getNdbError().code);
951
952 trans->close();
953
954 return NDBT_OK;
955 }
956
testDropSignalFragments(NDBT_Context * ctx,NDBT_Step * step)957 int testDropSignalFragments(NDBT_Context* ctx, NDBT_Step* step){
958 /* Segmented section exhaustion results in dropped signals
959 * Fragmented signals split one logical signal over multiple
960 * physical signals (to cope with the MAX_SIGNAL_LENGTH=32kB
961 * limitation).
962 * This testcase checks that when individual signals comprising
963 * a fragmented signal (in this case SCANTABREQ) are dropped, the
964 * system behaves correctly.
965 * Correct behaviour is to behave in the same way as if the signal
966 * was not fragmented, and for SCANTABREQ, to return a temporary
967 * resource error.
968 */
969 NdbRestarter restarter;
970 Ndb* pNdb= GETNDB(step);
971
972 /* SEND > ((2 * MAX_SEND_MESSAGE_BYTESIZE) + SOME EXTRA)
973 * This way we get at least 3 fragments
974 * However, as this is generally > 64kB, it's too much AttrInfo for
975 * a ScanTabReq, so the 'success' case returns error 874
976 */
977 const Uint32 PROG_WORDS= 16500;
978
979 struct SubCase
980 {
981 Uint32 errorInsertCode;
982 int expectedRc;
983 };
984 const Uint32 numSubCases= 5;
985 const SubCase cases[numSubCases]=
986 /* Error insert Scanrc */
987 {{ 0, 874}, // Normal, success which gives too much AI error
988 { 8074, 217}, // Drop first fragment -> error 217
989 { 8075, 217}, // Drop middle fragment(s) -> error 217
990 { 8076, 217}, // Drop last fragment -> error 217
991 { 8077, 217}}; // Drop all fragments -> error 217
992 const Uint32 numIterations= 50;
993
994 Uint32 buff[ PROG_WORDS + 10 ]; // 10 extra for final 'return' etc.
995
996 for (Uint32 iteration=0; iteration < (numIterations * numSubCases); iteration++)
997 {
998 /* Start a transaction */
999 NdbTransaction* trans= pNdb->startTransaction();
1000 CHECKNOTNULL(trans);
1001
1002 SubCase subcase= cases[iteration % numSubCases];
1003
1004 Uint32 errorInsertVal= subcase.errorInsertCode;
1005 // printf("Inserting error : %u\n", errorInsertVal);
1006 /* We insert the error twice, to bias races between
1007 * error-insert propagation and the succeeding scan
1008 * in favour of error insert winning!
1009 * This problem needs a more general fix
1010 */
1011 CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1012 CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1013
1014 NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
1015
1016 CHECKNOTNULL(scan);
1017
1018 CHECKEQUAL(0, scan->readTuples());
1019
1020 /* Create a large program, to give a large SCANTABREQ */
1021 NdbInterpretedCode prog(ctx->getTab(), buff, PROG_WORDS + 10);
1022
1023 for (Uint32 w=0; w < PROG_WORDS; w++)
1024 CHECKEQUAL(0, prog.load_const_null(1));
1025
1026 CHECKEQUAL(0, prog.interpret_exit_ok());
1027 CHECKEQUAL(0, prog.finalise());
1028
1029 CHECKEQUAL(0, scan->setInterpretedCode(&prog));
1030
1031 /* Api doesn't seem to wait for result of scan request */
1032 CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
1033
1034 CHECKEQUAL(0, trans->getNdbError().code);
1035
1036 CHECKEQUAL(-1, scan->nextResult());
1037
1038 int expectedResult= subcase.expectedRc;
1039 CHECKEQUAL(expectedResult, scan->getNdbError().code);
1040
1041 scan->close();
1042
1043 trans->close();
1044 }
1045
1046 restarter.insertErrorInAllNodes(0);
1047
1048 return NDBT_OK;
1049 }
1050
1051
1052 NDBT_TESTSUITE(testLimits);
1053
1054 TESTCASE("ExhaustSegmentedSectionPk",
1055 "Test behaviour at Segmented Section exhaustion for PK"){
1056 INITIALIZER(testSegmentedSectionPk);
1057 }
1058
1059 TESTCASE("ExhaustSegmentedSectionIX",
1060 "Test behaviour at Segmented Section exhaustion for Unique index"){
1061 INITIALIZER(testSegmentedSectionIx);
1062 }
1063 TESTCASE("ExhaustSegmentedSectionScan",
1064 "Test behaviour at Segmented Section exhaustion for Scan"){
1065 INITIALIZER(testSegmentedSectionScan);
1066 }
1067
1068 TESTCASE("DropSignalFragments",
1069 "Test behaviour of Segmented Section exhaustion with fragmented signals"){
1070 INITIALIZER(testDropSignalFragments);
1071 }
1072
1073 NDBT_TESTSUITE_END(testLimits);
1074
main(int argc,const char ** argv)1075 int main(int argc, const char** argv){
1076 ndb_init();
1077 NDBT_TESTSUITE_INSTANCE(testLimits);
1078 return testLimits.execute(argc, argv);
1079 }
1080