1 /* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
22
23 Library for providing TAP support for testing C and C++ was written
24 by Mats Kindahl <mats@mysql.com>.
25 */
26
27 #include "tap.h"
28
29 #include "my_global.h"
30 #include "my_stacktrace.h"
31
32 #include <stdlib.h>
33 #include <stdarg.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <signal.h>
37
38 /*
39 Visual Studio 2003 does not know vsnprintf but knows _vsnprintf.
40 We don't put this #define elsewhere because we prefer my_vsnprintf
41 everywhere instead, except when linking with libmysys is not
42 desirable - the case here.
43 */
44 #if defined(_MSC_VER) && ( _MSC_VER == 1310 )
45 #define vsnprintf _vsnprintf
46 #endif
47
48 /**
49 @defgroup MyTAP_Internal MyTAP Internals
50
51 Internal functions and data structures for the MyTAP implementation.
52 */
53
54 /**
55 Test data structure.
56
57 Data structure containing all information about the test suite.
58
59 @ingroup MyTAP_Internal
60 */
61 static TEST_DATA g_test = { NO_PLAN, 0, 0, "" };
62
63 /**
64 Output stream for test report message.
65
66 The macro is just a temporary solution.
67
68 @ingroup MyTAP_Internal
69 */
70 #define tapout stdout
71
72 /**
73 Emit the beginning of a test line, that is: "(not) ok", test number,
74 and description.
75
76 To emit the directive, use the emit_dir() function
77
78 @ingroup MyTAP_Internal
79
80 @see emit_dir
81
82 @param pass 'true' if test passed, 'false' otherwise
83 @param fmt Description of test in printf() format.
84 @param ap Vararg list for the description string above.
85 */
86 static void
vemit_tap(int pass,char const * fmt,va_list ap)87 vemit_tap(int pass, char const *fmt, va_list ap)
88 {
89 fprintf(tapout, "%sok %d%s",
90 pass ? "" : "not ",
91 ++g_test.last,
92 (fmt && *fmt) ? " - " : "");
93 if (fmt && *fmt)
94 vfprintf(tapout, fmt, ap);
95 fflush(tapout);
96 }
97
98
99 /**
100 Emit a TAP directive.
101
102 TAP directives are comments after that have the form:
103
104 @code
105 ok 1 # skip reason for skipping
106 not ok 2 # todo some text explaining what remains
107 @endcode
108
109 @ingroup MyTAP_Internal
110
111 @param dir Directive as a string
112 @param why Explanation string
113 */
114 static void
emit_dir(const char * dir,const char * why)115 emit_dir(const char *dir, const char *why)
116 {
117 fprintf(tapout, " # %s %s", dir, why);
118 fflush(tapout);
119 }
120
121
122 /**
123 Emit a newline to the TAP output stream.
124
125 @ingroup MyTAP_Internal
126 */
127 static void
emit_endl()128 emit_endl()
129 {
130 fprintf(tapout, "\n");
131 fflush(tapout);
132 }
133
134 static void
handle_core_signal(int signo)135 handle_core_signal(int signo)
136 {
137 /* BAIL_OUT("Signal %d thrown", signo); */
138 #ifdef HAVE_STACKTRACE
139 fprintf(stderr, "Signal %d thrown, attempting backtrace.\n", signo);
140 my_print_stacktrace(NULL, 0);
141 #endif
142 signal(signo, SIG_DFL);
143 raise(signo);
144 _exit(EXIT_FAILURE);
145 }
146
147 void
BAIL_OUT(char const * fmt,...)148 BAIL_OUT(char const *fmt, ...)
149 {
150 va_list ap;
151 va_start(ap, fmt);
152 fprintf(tapout, "Bail out! ");
153 vfprintf(tapout, fmt, ap);
154 emit_endl();
155 va_end(ap);
156 exit(255);
157 }
158
159
160 void
diag(char const * fmt,...)161 diag(char const *fmt, ...)
162 {
163 va_list ap;
164 va_start(ap, fmt);
165 fprintf(tapout, "# ");
166 vfprintf(tapout, fmt, ap);
167 emit_endl();
168 va_end(ap);
169 }
170
171 typedef struct signal_entry {
172 int signo;
173 void (*handler)(int);
174 } signal_entry;
175
176 static signal_entry install_signal[]= {
177 { SIGQUIT, handle_core_signal },
178 { SIGILL, handle_core_signal },
179 { SIGABRT, handle_core_signal },
180 { SIGFPE, handle_core_signal },
181 { SIGSEGV, handle_core_signal }
182 #ifdef SIGBUS
183 , { SIGBUS, handle_core_signal }
184 #endif
185 #ifdef SIGXCPU
186 , { SIGXCPU, handle_core_signal }
187 #endif
188 #ifdef SIGXCPU
189 , { SIGXFSZ, handle_core_signal }
190 #endif
191 #ifdef SIGXCPU
192 , { SIGSYS, handle_core_signal }
193 #endif
194 #ifdef SIGXCPU
195 , { SIGTRAP, handle_core_signal }
196 #endif
197 };
198
199 int skip_big_tests= 1;
200
201 void
plan(int const count)202 plan(int const count)
203 {
204 char *config= getenv("MYTAP_CONFIG");
205 size_t i;
206
207 if (config)
208 skip_big_tests= strcmp(config, "big");
209
210 /*
211 Install signal handler
212 */
213
214 for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i)
215 signal(install_signal[i].signo, install_signal[i].handler);
216
217 g_test.plan= count;
218 switch (count)
219 {
220 case NO_PLAN:
221 break;
222 default:
223 if (count > 0)
224 {
225 fprintf(tapout, "1..%d\n", count);
226 fflush(tapout);
227 }
228 break;
229 }
230 }
231
232
233 void
skip_all(char const * reason,...)234 skip_all(char const *reason, ...)
235 {
236 va_list ap;
237 va_start(ap, reason);
238 fprintf(tapout, "1..0 # skip ");
239 vfprintf(tapout, reason, ap);
240 fflush(tapout);
241 va_end(ap);
242 exit(0);
243 }
244
245 void
ok(int const pass,char const * fmt,...)246 ok(int const pass, char const *fmt, ...)
247 {
248 va_list ap;
249 va_start(ap, fmt);
250
251 if (!pass && *g_test.todo == '\0')
252 ++g_test.failed;
253
254 vemit_tap(pass, fmt, ap);
255 va_end(ap);
256 if (*g_test.todo != '\0')
257 emit_dir("todo", g_test.todo);
258 emit_endl();
259 }
260
261 void
ok1(int const pass)262 ok1(int const pass)
263 {
264 va_list ap;
265
266 memset(&ap, 0, sizeof(ap));
267
268 if (!pass && *g_test.todo == '\0')
269 ++g_test.failed;
270
271 vemit_tap(pass, NULL, ap);
272
273 if (*g_test.todo != '\0')
274 emit_dir("todo", g_test.todo);
275
276 emit_endl();
277 }
278
279 void
skip(int how_many,char const * fmt,...)280 skip(int how_many, char const *fmt, ...)
281 {
282 char reason[80];
283 if (fmt && *fmt)
284 {
285 va_list ap;
286 va_start(ap, fmt);
287 vsnprintf(reason, sizeof(reason), fmt, ap);
288 va_end(ap);
289 }
290 else
291 reason[0] = '\0';
292
293 while (how_many-- > 0)
294 {
295 va_list ap;
296 memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */
297 vemit_tap(1, NULL, ap);
298 emit_dir("skip", reason);
299 emit_endl();
300 }
301 }
302
303 void
todo_start(char const * message,...)304 todo_start(char const *message, ...)
305 {
306 va_list ap;
307 va_start(ap, message);
308 vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap);
309 va_end(ap);
310 }
311
312 void
todo_end()313 todo_end()
314 {
315 *g_test.todo = '\0';
316 }
317
exit_status()318 int exit_status() {
319 /*
320 If there were no plan, we write one last instead.
321 */
322 if (g_test.plan == NO_PLAN)
323 plan(g_test.last);
324
325 if (g_test.plan != g_test.last)
326 {
327 diag("%d tests planned but%s %d executed",
328 g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last);
329 return EXIT_FAILURE;
330 }
331
332 if (g_test.failed > 0)
333 {
334 diag("Failed %d tests!", g_test.failed);
335 return EXIT_FAILURE;
336 }
337
338 return EXIT_SUCCESS;
339 }
340
341 /**
342 @mainpage Testing C and C++ using MyTAP
343
344 @section IntroSec Introduction
345
346 Unit tests are used to test individual components of a system. In
347 contrast, functional tests usually test the entire system. The
348 rationale is that each component should be correct if the system is
349 to be correct. Unit tests are usually small pieces of code that
350 tests an individual function, class, a module, or other unit of the
351 code.
352
353 Observe that a correctly functioning system can be built from
354 "faulty" components. The problem with this approach is that as the
355 system evolves, the bugs surface in unexpected ways, making
356 maintenance harder.
357
358 The advantages of using unit tests to test components of the system
359 are several:
360
361 - The unit tests can make a more thorough testing than the
362 functional tests by testing correctness even for pathological use
363 (which shouldn't be present in the system). This increases the
364 overall robustness of the system and makes maintenance easier.
365
366 - It is easier and faster to find problems with a malfunctioning
367 component than to find problems in a malfunctioning system. This
368 shortens the compile-run-edit cycle and therefore improves the
369 overall performance of development.
370
371 - The component has to support at least two uses: in the system and
372 in a unit test. This leads to more generic and stable interfaces
373 and in addition promotes the development of reusable components.
374
375 For example, the following are typical functional tests:
376 - Does transactions work according to specifications?
377 - Can we connect a client to the server and execute statements?
378
379 In contrast, the following are typical unit tests:
380
381 - Can the 'String' class handle a specified list of character sets?
382 - Does all operations for 'my_bitmap' produce the correct result?
383 - Does all the NIST test vectors for the AES implementation encrypt
384 correctly?
385
386
387 @section UnitTest Writing unit tests
388
389 The purpose of writing unit tests is to use them to drive component
390 development towards a solution that passes the tests. This means that the
391 unit tests has to be as complete as possible, testing at least:
392
393 - Normal input
394 - Borderline cases
395 - Faulty input
396 - Error handling
397 - Bad environment
398
399 @subsection NormalSubSec Normal input
400
401 This is to test that the component have the expected behaviour.
402 This is just plain simple: test that it works. For example, test
403 that you can unpack what you packed, adding gives the sum, pincing
404 the duck makes it quack.
405
406 This is what everybody does when they write tests.
407
408
409 @subsection BorderlineTests Borderline cases
410
411 If you have a size anywhere for your component, does it work for
412 size 1? Size 0? Sizes close to <code>UINT_MAX</code>?
413
414 It might not be sensible to have a size 0, so in this case it is
415 not a borderline case, but rather a faulty input (see @ref
416 FaultyInputTests).
417
418
419 @subsection FaultyInputTests Faulty input
420
421 Does your bitmap handle 0 bits size? Well, it might not be designed
422 for it, but is should <em>not</em> crash the application, but
423 rather produce an error. This is called defensive programming.
424
425 Unfortunately, adding checks for values that should just not be
426 entered at all is not always practical: the checks cost cycles and
427 might cost more than it's worth. For example, some functions are
428 designed so that you may not give it a null pointer. In those
429 cases it's not sensible to pass it <code>NULL</code> just to see it
430 crash.
431
432 Since every experienced programmer add an <code>assert()</code> to
433 ensure that you get a proper failure for the debug builds when a
434 null pointer passed (you add asserts too, right?), you will in this
435 case instead have a controlled (early) crash in the debug build.
436
437
438 @subsection ErrorHandlingTests Error handling
439
440 This is testing that the errors your component is designed to give
441 actually are produced. For example, testing that trying to open a
442 non-existing file produces a sensible error code.
443
444
445 @subsection BadEnvironmentTests Environment
446
447 Sometimes, modules has to behave well even when the environment
448 fails to work correctly. Typical examples are when the computer is
449 out of dynamic memory or when the disk is full. You can emulate
450 this by replacing, e.g., <code>malloc()</code> with your own
451 version that will work for a while, but then fail. Some things are
452 worth to keep in mind here:
453
454 - Make sure to make the function fail deterministically, so that
455 you really can repeat the test.
456
457 - Make sure that it doesn't just fail immediately. The unit might
458 have checks for the first case, but might actually fail some time
459 in the near future.
460
461
462 @section UnitTest How to structure a unit test
463
464 In this section we will give some advice on how to structure the
465 unit tests to make the development run smoothly. The basic
466 structure of a test is:
467
468 - Plan
469 - Test
470 - Report
471
472
473 @subsection TestPlanning Plan the test
474
475 Planning the test means telling how many tests there are. In the
476 event that one of the tests causes a crash, it is then possible to
477 see that there are fewer tests than expected, and print a proper
478 error message.
479
480 To plan a test, use the @c plan() function in the following manner:
481
482 @code
483 int main(int argc, char *argv[])
484 {
485 plan(5);
486 .
487 .
488 .
489 }
490 @endcode
491
492 If you don't call the @c plan() function, the number of tests
493 executed will be printed at the end. This is intended to be used
494 while developing the unit and you are constantly adding tests. It
495 is not indented to be used after the unit has been released.
496
497
498 @subsection TestRunning Execute the test
499
500 To report the status of a test, the @c ok() function is used in the
501 following manner:
502
503 @code
504 int main(int argc, char *argv[])
505 {
506 plan(5);
507 ok(ducks == paddling_ducks,
508 "%d ducks did not paddle", ducks - paddling_ducks);
509 .
510 .
511 .
512 }
513 @endcode
514
515 This will print a test result line on the standard output in TAP
516 format, which allows TAP handling frameworks (like Test::Harness)
517 to parse the status of the test.
518
519 @subsection TestReport Report the result of the test
520
521 At the end, a complete test report should be written, with some
522 statistics. If the test returns EXIT_SUCCESS, all tests were
523 successfull, otherwise at least one test failed.
524
525 To get a TAP complient output and exit status, report the exit
526 status in the following manner:
527
528 @code
529 int main(int argc, char *argv[])
530 {
531 plan(5);
532 ok(ducks == paddling_ducks,
533 "%d ducks did not paddle", ducks - paddling_ducks);
534 .
535 .
536 .
537 return exit_status();
538 }
539 @endcode
540
541 @section DontDoThis Ways to not do unit testing
542
543 In this section, we'll go through some quite common ways to write
544 tests that are <em>not</em> a good idea.
545
546 @subsection BreadthFirstTests Doing breadth-first testing
547
548 If you're writing a library with several functions, don't test all
549 functions using size 1, then all functions using size 2, etc. If a
550 test for size 42 fails, you have no easy way of tracking down why
551 it failed.
552
553 It is better to concentrate on getting one function to work at a
554 time, which means that you test each function for all sizes that
555 you think is reasonable. Then you continue with the next function,
556 doing the same. This is usually also the way that a library is
557 developed (one function at a time) so stick to testing that is
558 appropriate for now the unit is developed.
559
560 @subsection JustToBeSafeTest Writing unnecessarily large tests
561
562 Don't write tests that use parameters in the range 1-1024 unless
563 you have a very good reason to belive that the component will
564 succeed for 562 but fail for 564 (the numbers picked are just
565 examples).
566
567 It is very common to write extensive tests "just to be safe."
568 Having a test suite with a lot of values might give you a warm
569 fuzzy feeling, but it doesn't really help you find the bugs. Good
570 tests fail; seriously, if you write a test that you expect to
571 succeed, you don't need to write it. If you think that it
572 <em>might</em> fail, <em>then</em> you should write it.
573
574 Don't take this as an excuse to avoid writing any tests at all
575 "since I make no mistakes" (when it comes to this, there are two
576 kinds of people: those who admit they make mistakes, and those who
577 don't); rather, this means that there is no reason to test that
578 using a buffer with size 100 works when you have a test for buffer
579 size 96.
580
581 The drawback is that the test suite takes longer to run, for little
582 or no benefit. It is acceptable to do a exhaustive test if it
583 doesn't take too long to run and it is quite common to do an
584 exhaustive test of a function for a small set of values.
585 Use your judgment to decide what is excessive: your milage may
586 vary.
587 */
588
589 /**
590 @example simple.t.c
591
592 This is an simple example of how to write a test using the
593 library. The output of this program is:
594
595 @code
596 1..1
597 # Testing basic functions
598 ok 1 - Testing gcs()
599 @endcode
600
601 The basic structure is: plan the number of test points using the
602 plan() function, perform the test and write out the result of each
603 test point using the ok() function, print out a diagnostics message
604 using diag(), and report the result of the test by calling the
605 exit_status() function. Observe that this test does excessive
606 testing (see @ref JustToBeSafeTest), but the test point doesn't
607 take very long time.
608 */
609
610 /**
611 @example todo.t.c
612
613 This example demonstrates how to use the <code>todo_start()</code>
614 and <code>todo_end()</code> function to mark a sequence of tests to
615 be done. Observe that the tests are assumed to fail: if any test
616 succeeds, it is considered a "bonus".
617 */
618
619 /**
620 @example skip.t.c
621
622 This is an example of how the <code>SKIP_BLOCK_IF</code> can be
623 used to skip a predetermined number of tests. Observe that the
624 macro actually skips the following statement, but it's not sensible
625 to use anything than a block.
626 */
627
628 /**
629 @example skip_all.t.c
630
631 Sometimes, you skip an entire test because it's testing a feature
632 that doesn't exist on the system that you're testing. To skip an
633 entire test, use the <code>skip_all()</code> function according to
634 this example.
635 */
636