1 /* Copyright (c) 2006, 2021, Oracle and/or its affiliates.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
22 
23    Library for providing TAP support for testing C and C++ was written
24    by Mats Kindahl <mats@mysql.com>.
25 */
26 
27 #include "tap.h"
28 
29 #include "my_global.h"
30 #include "my_stacktrace.h"
31 
32 #include <stdlib.h>
33 #include <stdarg.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <signal.h>
37 #ifdef HAVE_UNISTD_H
38 #include <unistd.h>
39 #endif
40 
41 /*
42   Visual Studio 2003 does not know vsnprintf but knows _vsnprintf.
43   We don't put this #define elsewhere because we prefer my_vsnprintf
44   everywhere instead, except when linking with libmysys is not
45   desirable - the case here.
46 */
47 #if defined(_MSC_VER) && ( _MSC_VER == 1310 )
48 #define vsnprintf _vsnprintf
49 #endif
50 
51 /**
52    @defgroup MyTAP_Internal MyTAP Internals
53 
54    Internal functions and data structures for the MyTAP implementation.
55 */
56 
57 /**
58    Test data structure.
59 
60    Data structure containing all information about the test suite.
61 
62    @ingroup MyTAP_Internal
63  */
64 static TEST_DATA g_test = { NO_PLAN, 0, 0, "" };
65 
66 /**
67    Output stream for test report message.
68 
69    The macro is just a temporary solution.
70 
71    @ingroup MyTAP_Internal
72  */
73 #define tapout stdout
74 
75 /**
76   Emit the beginning of a test line, that is: "(not) ok", test number,
77   and description.
78 
79   To emit the directive, use the emit_dir() function
80 
81   @ingroup MyTAP_Internal
82 
83   @see emit_dir
84 
85   @param pass  'true' if test passed, 'false' otherwise
86   @param fmt   Description of test in printf() format.
87   @param ap    Vararg list for the description string above.
88  */
89 static void
vemit_tap(int pass,char const * fmt,va_list ap)90 vemit_tap(int pass, char const *fmt, va_list ap)
91 {
92   fprintf(tapout, "%sok %d%s",
93           pass ? "" : "not ",
94           ++g_test.last,
95           (fmt && *fmt) ? " - " : "");
96   if (fmt && *fmt)
97     vfprintf(tapout, fmt, ap);
98   fflush(tapout);
99 }
100 
101 
102 /**
103    Emit a TAP directive.
104 
105    TAP directives are comments after that have the form:
106 
107    @code
108    ok 1 # skip reason for skipping
109    not ok 2 # todo some text explaining what remains
110    @endcode
111 
112    @ingroup MyTAP_Internal
113 
114    @param dir  Directive as a string
115    @param why  Explanation string
116  */
117 static void
emit_dir(const char * dir,const char * why)118 emit_dir(const char *dir, const char *why)
119 {
120   fprintf(tapout, " # %s %s", dir, why);
121   fflush(tapout);
122 }
123 
124 
125 /**
126    Emit a newline to the TAP output stream.
127 
128    @ingroup MyTAP_Internal
129  */
130 static void
emit_endl()131 emit_endl()
132 {
133   fprintf(tapout, "\n");
134   fflush(tapout);
135 }
136 
137 static void
handle_core_signal(int signo)138 handle_core_signal(int signo)
139 {
140   /* BAIL_OUT("Signal %d thrown", signo); */
141 #ifdef HAVE_STACKTRACE
142   fprintf(stderr, "Signal %d thrown, attempting backtrace.\n", signo);
143   my_print_stacktrace(NULL, 0);
144 #endif
145   signal(signo, SIG_DFL);
146   raise(signo);
147   _exit(EXIT_FAILURE);
148 }
149 
150 void
BAIL_OUT(char const * fmt,...)151 BAIL_OUT(char const *fmt, ...)
152 {
153   va_list ap;
154   va_start(ap, fmt);
155   fprintf(tapout, "Bail out! ");
156   vfprintf(tapout, fmt, ap);
157   emit_endl();
158   va_end(ap);
159   exit(255);
160 }
161 
162 
163 void
diag(char const * fmt,...)164 diag(char const *fmt, ...)
165 {
166   va_list ap;
167   va_start(ap, fmt);
168   fprintf(tapout, "# ");
169   vfprintf(tapout, fmt, ap);
170   emit_endl();
171   va_end(ap);
172 }
173 
174 typedef struct signal_entry {
175   int signo;
176   void (*handler)(int);
177 } signal_entry;
178 
179 static signal_entry install_signal[]= {
180 #ifdef _WIN32
181   { SIGTERM, handle_core_signal },
182 #else
183   { SIGQUIT, handle_core_signal },
184 #endif
185   { SIGILL,  handle_core_signal },
186   { SIGABRT, handle_core_signal },
187   { SIGFPE,  handle_core_signal },
188   { SIGSEGV, handle_core_signal }
189 #ifdef SIGBUS
190   , { SIGBUS,  handle_core_signal }
191 #endif
192 #ifdef SIGXCPU
193   , { SIGXCPU, handle_core_signal }
194 #endif
195 #ifdef SIGXCPU
196   , { SIGXFSZ, handle_core_signal }
197 #endif
198 #ifdef SIGXCPU
199   , { SIGSYS,  handle_core_signal }
200 #endif
201 #ifdef SIGXCPU
202   , { SIGTRAP, handle_core_signal }
203 #endif
204 };
205 
206 int skip_big_tests= 1;
207 
208 void
plan(int const count)209 plan(int const count)
210 {
211   char *config= getenv("MYTAP_CONFIG");
212   size_t i;
213 
214   if (config)
215     skip_big_tests= strcmp(config, "big");
216 
217   /*
218     Install signal handler
219   */
220 
221   for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i)
222     signal(install_signal[i].signo, install_signal[i].handler);
223 
224   g_test.plan= count;
225   switch (count)
226   {
227   case NO_PLAN:
228     break;
229   default:
230     if (count > 0)
231     {
232       fprintf(tapout, "1..%d\n", count);
233       fflush(tapout);
234     }
235     break;
236   }
237 }
238 
239 
240 void
skip_all(char const * reason,...)241 skip_all(char const *reason, ...)
242 {
243   va_list ap;
244   va_start(ap, reason);
245   fprintf(tapout, "1..0 # skip ");
246   vfprintf(tapout, reason, ap);
247   fflush(tapout);
248   va_end(ap);
249   exit(0);
250 }
251 
252 void
ok(int const pass,char const * fmt,...)253 ok(int const pass, char const *fmt, ...)
254 {
255   va_list ap;
256   va_start(ap, fmt);
257 
258   if (!pass && *g_test.todo == '\0')
259     ++g_test.failed;
260 
261   vemit_tap(pass, fmt, ap);
262   va_end(ap);
263   if (*g_test.todo != '\0')
264     emit_dir("todo", g_test.todo);
265   emit_endl();
266 }
267 
268 void
ok1(int const pass)269 ok1(int const pass)
270 {
271   va_list ap;
272 
273   memset(&ap, 0, sizeof(ap));
274 
275   if (!pass && *g_test.todo == '\0')
276     ++g_test.failed;
277 
278   vemit_tap(pass, NULL, ap);
279 
280   if (*g_test.todo != '\0')
281     emit_dir("todo", g_test.todo);
282 
283   emit_endl();
284 }
285 
286 void
skip(int how_many,char const * fmt,...)287 skip(int how_many, char const *fmt, ...)
288 {
289   char reason[80];
290   if (fmt && *fmt)
291   {
292     va_list ap;
293     va_start(ap, fmt);
294     vsnprintf(reason, sizeof(reason), fmt, ap);
295     va_end(ap);
296   }
297   else
298     reason[0] = '\0';
299 
300   while (how_many-- > 0)
301   {
302     va_list ap;
303     memset((char*) &ap, 0, sizeof(ap));         /* Keep compiler happy */
304     vemit_tap(1, NULL, ap);
305     emit_dir("skip", reason);
306     emit_endl();
307   }
308 }
309 
310 void
todo_start(char const * message,...)311 todo_start(char const *message, ...)
312 {
313   va_list ap;
314   va_start(ap, message);
315   vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap);
316   va_end(ap);
317 }
318 
319 void
todo_end()320 todo_end()
321 {
322   *g_test.todo = '\0';
323 }
324 
exit_status()325 int exit_status() {
326   /*
327     If there were no plan, we write one last instead.
328   */
329   if (g_test.plan == NO_PLAN)
330     plan(g_test.last);
331 
332   if (g_test.plan != g_test.last)
333   {
334     diag("%d tests planned but%s %d executed",
335          g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last);
336     return EXIT_FAILURE;
337   }
338 
339   if (g_test.failed > 0)
340   {
341     diag("Failed %d tests!", g_test.failed);
342     return EXIT_FAILURE;
343   }
344 
345   return EXIT_SUCCESS;
346 }
347 
348 /**
349    @mainpage Testing C and C++ using MyTAP
350 
351    @section IntroSec Introduction
352 
353    Unit tests are used to test individual components of a system. In
354    contrast, functional tests usually test the entire system.  The
355    rationale is that each component should be correct if the system is
356    to be correct.  Unit tests are usually small pieces of code that
357    tests an individual function, class, a module, or other unit of the
358    code.
359 
360    Observe that a correctly functioning system can be built from
361    "faulty" components.  The problem with this approach is that as the
362    system evolves, the bugs surface in unexpected ways, making
363    maintenance harder.
364 
365    The advantages of using unit tests to test components of the system
366    are several:
367 
368    - The unit tests can make a more thorough testing than the
369      functional tests by testing correctness even for pathological use
370      (which shouldn't be present in the system).  This increases the
371      overall robustness of the system and makes maintenance easier.
372 
373    - It is easier and faster to find problems with a malfunctioning
374      component than to find problems in a malfunctioning system.  This
375      shortens the compile-run-edit cycle and therefore improves the
376      overall performance of development.
377 
378    - The component has to support at least two uses: in the system and
379      in a unit test.  This leads to more generic and stable interfaces
380      and in addition promotes the development of reusable components.
381 
382    For example, the following are typical functional tests:
383    - Does transactions work according to specifications?
384    - Can we connect a client to the server and execute statements?
385 
386    In contrast, the following are typical unit tests:
387 
388    - Can the 'String' class handle a specified list of character sets?
389    - Does all operations for 'my_bitmap' produce the correct result?
390    - Does all the NIST test vectors for the AES implementation encrypt
391      correctly?
392 
393 
394    @section UnitTest Writing unit tests
395 
396    The purpose of writing unit tests is to use them to drive component
397    development towards a solution that passes the tests.  This means that the
398    unit tests has to be as complete as possible, testing at least:
399 
400    - Normal input
401    - Borderline cases
402    - Faulty input
403    - Error handling
404    - Bad environment
405 
406    @subsection NormalSubSec Normal input
407 
408    This is to test that the component have the expected behaviour.
409    This is just plain simple: test that it works.  For example, test
410    that you can unpack what you packed, adding gives the sum, pincing
411    the duck makes it quack.
412 
413    This is what everybody does when they write tests.
414 
415 
416    @subsection BorderlineTests Borderline cases
417 
418    If you have a size anywhere for your component, does it work for
419    size 1? Size 0? Sizes close to <code>UINT_MAX</code>?
420 
421    It might not be sensible to have a size 0, so in this case it is
422    not a borderline case, but rather a faulty input (see @ref
423    FaultyInputTests).
424 
425 
426    @subsection FaultyInputTests Faulty input
427 
428    Does your bitmap handle 0 bits size? Well, it might not be designed
429    for it, but is should <em>not</em> crash the application, but
430    rather produce an error.  This is called defensive programming.
431 
432    Unfortunately, adding checks for values that should just not be
433    entered at all is not always practical: the checks cost cycles and
434    might cost more than it's worth.  For example, some functions are
435    designed so that you may not give it a null pointer.  In those
436    cases it's not sensible to pass it <code>NULL</code> just to see it
437    crash.
438 
439    Since every experienced programmer add an <code>assert()</code> to
440    ensure that you get a proper failure for the debug builds when a
441    null pointer passed (you add asserts too, right?), you will in this
442    case instead have a controlled (early) crash in the debug build.
443 
444 
445    @subsection ErrorHandlingTests Error handling
446 
447    This is testing that the errors your component is designed to give
448    actually are produced.  For example, testing that trying to open a
449    non-existing file produces a sensible error code.
450 
451 
452    @subsection BadEnvironmentTests Environment
453 
454    Sometimes, modules has to behave well even when the environment
455    fails to work correctly.  Typical examples are when the computer is
456    out of dynamic memory or when the disk is full.  You can emulate
457    this by replacing, e.g., <code>malloc()</code> with your own
458    version that will work for a while, but then fail.  Some things are
459    worth to keep in mind here:
460 
461    - Make sure to make the function fail deterministically, so that
462      you really can repeat the test.
463 
464    - Make sure that it doesn't just fail immediately.  The unit might
465      have checks for the first case, but might actually fail some time
466      in the near future.
467 
468 
469    @section UnitTest How to structure a unit test
470 
471    In this section we will give some advice on how to structure the
472    unit tests to make the development run smoothly.  The basic
473    structure of a test is:
474 
475    - Plan
476    - Test
477    - Report
478 
479 
480    @subsection TestPlanning Plan the test
481 
482    Planning the test means telling how many tests there are.  In the
483    event that one of the tests causes a crash, it is then possible to
484    see that there are fewer tests than expected, and print a proper
485    error message.
486 
487    To plan a test, use the @c plan() function in the following manner:
488 
489    @code
490    int main(int argc, char *argv[])
491    {
492      plan(5);
493          .
494          .
495          .
496    }
497    @endcode
498 
499    If you don't call the @c plan() function, the number of tests
500    executed will be printed at the end.  This is intended to be used
501    while developing the unit and you are constantly adding tests.  It
502    is not indented to be used after the unit has been released.
503 
504 
505    @subsection TestRunning Execute the test
506 
507    To report the status of a test, the @c ok() function is used in the
508    following manner:
509 
510    @code
511    int main(int argc, char *argv[])
512    {
513      plan(5);
514      ok(ducks == paddling_ducks,
515         "%d ducks did not paddle", ducks - paddling_ducks);
516              .
517              .
518              .
519    }
520    @endcode
521 
522    This will print a test result line on the standard output in TAP
523    format, which allows TAP handling frameworks (like Test::Harness)
524    to parse the status of the test.
525 
526    @subsection TestReport  Report the result of the test
527 
528    At the end, a complete test report should be written, with some
529    statistics. If the test returns EXIT_SUCCESS, all tests were
530    successfull, otherwise at least one test failed.
531 
532    To get a TAP complient output and exit status, report the exit
533    status in the following manner:
534 
535    @code
536    int main(int argc, char *argv[])
537    {
538      plan(5);
539      ok(ducks == paddling_ducks,
540         "%d ducks did not paddle", ducks - paddling_ducks);
541              .
542              .
543              .
544      return exit_status();
545    }
546    @endcode
547 
548    @section DontDoThis Ways to not do unit testing
549 
550    In this section, we'll go through some quite common ways to write
551    tests that are <em>not</em> a good idea.
552 
553    @subsection BreadthFirstTests Doing breadth-first testing
554 
555    If you're writing a library with several functions, don't test all
556    functions using size 1, then all functions using size 2, etc.  If a
557    test for size 42 fails, you have no easy way of tracking down why
558    it failed.
559 
560    It is better to concentrate on getting one function to work at a
561    time, which means that you test each function for all sizes that
562    you think is reasonable.  Then you continue with the next function,
563    doing the same. This is usually also the way that a library is
564    developed (one function at a time) so stick to testing that is
565    appropriate for now the unit is developed.
566 
567    @subsection JustToBeSafeTest Writing unnecessarily large tests
568 
569    Don't write tests that use parameters in the range 1-1024 unless
570    you have a very good reason to belive that the component will
571    succeed for 562 but fail for 564 (the numbers picked are just
572    examples).
573 
574    It is very common to write extensive tests "just to be safe."
575    Having a test suite with a lot of values might give you a warm
576    fuzzy feeling, but it doesn't really help you find the bugs.  Good
577    tests fail; seriously, if you write a test that you expect to
578    succeed, you don't need to write it.  If you think that it
579    <em>might</em> fail, <em>then</em> you should write it.
580 
581    Don't take this as an excuse to avoid writing any tests at all
582    "since I make no mistakes" (when it comes to this, there are two
583    kinds of people: those who admit they make mistakes, and those who
584    don't); rather, this means that there is no reason to test that
585    using a buffer with size 100 works when you have a test for buffer
586    size 96.
587 
588    The drawback is that the test suite takes longer to run, for little
589    or no benefit.  It is acceptable to do a exhaustive test if it
590    doesn't take too long to run and it is quite common to do an
591    exhaustive test of a function for a small set of values.
592    Use your judgment to decide what is excessive: your milage may
593    vary.
594 */
595 
596 /**
597    @example simple.t.c
598 
599    This is an simple example of how to write a test using the
600    library.  The output of this program is:
601 
602    @code
603    1..1
604    # Testing basic functions
605    ok 1 - Testing gcs()
606    @endcode
607 
608    The basic structure is: plan the number of test points using the
609    plan() function, perform the test and write out the result of each
610    test point using the ok() function, print out a diagnostics message
611    using diag(), and report the result of the test by calling the
612    exit_status() function.  Observe that this test does excessive
613    testing (see @ref JustToBeSafeTest), but the test point doesn't
614    take very long time.
615 */
616 
617 /**
618    @example todo.t.c
619 
620    This example demonstrates how to use the <code>todo_start()</code>
621    and <code>todo_end()</code> function to mark a sequence of tests to
622    be done.  Observe that the tests are assumed to fail: if any test
623    succeeds, it is considered a "bonus".
624 */
625 
626 /**
627    @example skip.t.c
628 
629    This is an example of how the <code>SKIP_BLOCK_IF</code> can be
630    used to skip a predetermined number of tests. Observe that the
631    macro actually skips the following statement, but it's not sensible
632    to use anything than a block.
633 */
634 
635 /**
636    @example skip_all.t.c
637 
638    Sometimes, you skip an entire test because it's testing a feature
639    that doesn't exist on the system that you're testing. To skip an
640    entire test, use the <code>skip_all()</code> function according to
641    this example.
642  */
643