1 /*
2  *========================================================================
3  * $Id: help.c 529 2011-04-01 17:49:31Z rgbatduke $
4  *
5  * See copyright in copyright.h and the accompanying file COPYING
6  *========================================================================
7  */
8 
9 #include "dieharder.h"
10 
help()11 void help()
12 {
13 
14  int i;
15 
16  dieharder_test_types();
17 
18  /*
19   * help for EACH test must be defined in the test.c package, even if
20   * it is null or says "sorry, no help available".  help should be
21   * a brief test description all pretty with each line beginning with
22   * "#" so it is easy to filter out of standard test results output.
23   */
24  if(all==YES){
25    for(i=0;i<900;i++){
26      if(dh_test_types[i]){
27        fprintf(stdout,"%s",dh_test_types[i]->description);
28      }
29    }
30    Exit(0);
31  }
32 
33  /* printf("OK, what is dtest_num = %u\n",dtest_num);*/
34 
35  if(dh_test_types[dtest_num]){
36    fprintf(stdout,"%s",dh_test_types[dtest_num]->description);
37    Exit(0);
38  } else {
39    Usage();
40    Exit(0);
41  }
42 
43 }
44 
Usage()45 void Usage()
46 {
47 
48  dh_header();
49 
50  fprintf(stdout, "\n\
51 Usage:\n\
52 \n\
53 dieharder [-a] [-d dieharder test number] [-f filename] [-B]\n\
54           [-D output flag [-D output flag] ... ] [-F] [-c separator]\n\
55           [-g generator number or -1] [-h] [-k ks_flag] [-l] \n\
56           [-L overlap] [-m multiply_p] [-n ntuple] \n\
57           [-p number of p samples] [-P Xoff]\n\
58           [-o filename] [-s seed strategy] [-S random number seed]\n\
59           [-n ntuple] [-p number of p samples] [-o filename]\n\
60           [-s seed strategy] [-S random number seed]\n\
61           [-t number of test samples] [-v verbose flag]\n\
62           [-W weak] [-X fail] [-Y Xtrategy]\n\
63           [-x xvalue] [-y yvalue] [-z zvalue]\n");
64 fprintf(stdout, "\n\
65   -a - runs all the tests with standard/default options to create a report\n\
66   -d test number -  selects specific diehard test.\n\
67   -f filename - generators 201 or 202 permit either raw binary or \n\
68      formatted ASCII numbers to be read in from a file for testing.\n\
69      generator 200 reads in raw binary numbers from stdin.\n\
70      Note well: many tests with default parameters require a lot of rands!\n\
71      To see a sample of the (required) header for ASCII formatted input, run\n\
72 \n\
73          dieharder -o -f example.input -t 10\n\
74 \n\
75      and then examine the contents of example.input.\n\
76      Raw binary input reads 32 bit increments of the specified data stream.\n\
77      stdin_input_raw accepts a pipe from a raw binary stream.\n\
78   -B binary output (used with -o)\n\
79   -D output flag - permits fields to be selected for inclusion in dieharder\n\
80      output.  Each flag can be entered as a binary number that turns\n\
81      on a specific output field or header or by flag name; flags are\n\
82      aggregated.  To see all currently known flags use the -F command.\n\
83   -F - lists all known flags by name and number.\n\
84   -c table separator - where separator is e.g. ',' (CSV) or ' ' (whitespace).\n\
85   -g generator number - selects a specific generator for testing.  Using\n\
86      -1 causes all known generators to be printed out to the display.\n\
87   -h prints context-sensitive help -- usually Usage (this message) or a\n\
88      test synopsis if entered as e.g. dieharder -D 3 -h.\n\
89   -k ks_flag - ks_flag\n\
90 \n\
91      0 is fast but slightly sloppy for psamples > 4999 (default).\n\
92 \n\
93      1 is MUCH slower but more accurate for larger numbers of psamples.\n\
94 \n\
95      2 is very slow and accurate to machine precision.\n\
96 \n\
97      3 is kuiper ks, fast, quite inaccurate for small samples, deprecated.\n\
98 \n\
99   -l list all known tests.\n\
100   -L overlap \n\
101 \n\
102      1 (use overlap, default) \n\
103 \n\
104      0 (don't use overlap) \n\
105 \n\
106      in operm5 or other tests that support overlapping and non-overlapping \n\
107      sample modes. \n\
108   -m multiply_p - multiply default # of psamples in -a(ll) runs to crank\n\
109      up the resolution of failure.\n\
110   -n ntuple - set ntuple length for tests on short bit strings that permit\n\
111      the length to be varied (e.g. rgb bitdist).\n\
112   -o filename - output -t count random numbers from current generator to file.\n\
113   -p count - sets the number of p-value samples per test (default 100).\n\
114   -P Xoff - sets the number of psamples that will cumulate before deciding \n\
115      that a generator is 'good' and really, truly passes even a -Y 2 T2D run. \n\
116      Currently the default is 100000; eventually it will be set from \n\
117      AES-derived T2D test failure thresholds for fully automated reliable \n\
118      operation, but for now it is more a 'boredom' threshold set by how long \n\
119      one might reasonably want to wait on any given test run. \n\
120   -S seed - where seed is a uint.  Overrides the default random seed\n\
121      selection.  Ignored for file or stdin input.\n\
122   -s strategy - if strategy is the (default) 0, dieharder reseeds (or\n\
123      rewinds) once at the beginning when the random number generator is\n\
124      selected and then never again.  If strategy is nonzero, the generator\n\
125      is reseeded or rewound at the beginning of EACH TEST.  If -S seed was\n\
126      specified, or a file is used, this means every test is applied to the\n\
127      same sequence (which is useful for validation and testing of dieharder,\n\
128      but not a good way to test rngs).  Otherwise a new random seed is\n\
129      selected for each test.\n\
130   -t count - sets the number of random entities used in each test, where\n\
131      possible.  Be warned -- some tests will take a long time with the\n\
132      default value of 10000.  Read the test synopses for suggested settings\n\
133      for -t or use -a first.  Many tests will ignore -t as they require\n\
134      a very specific number of samples to be used in generating their\n\
135      statistic.\n");
136 fprintf(stdout, "  -W weak - sets the 'weak' threshold to make the test(s) more or less \n\
137      forgiving during e.g. a test-to-destruction run.  Default is currently \n\
138      0.005. \n\
139   -X fail - sets the 'fail' threshold to make the test(s) more or less \n\
140      forgiving during e.g. a test-to-destruction run.  Default is currently \n\
141      0.000001, which is basically 'certain failure of the null hypothesis', \n\
142      the desired mode of reproducible generator failure. \n\
143   -Y Xtrategy - the Xtrategy flag controls the new 'test to failure' (T2F)\n\
144      modes.  These flags and their modes act as follows:\n\
145 \n\
146      0 - just run dieharder with the specified number of tsamples and\n\
147      psamples, do not dynamically modify a run based on results.  This is\n\
148      the way it has always run, and is still the default.\n\
149 \n\
150      1 - 'resolve ambiguity' (RA) mode.  If a test returns 'weak', this is \n\
151      an undesired result.  What does that mean, after all?  If you run a long\n\
152      test series, you will see occasional weak returns for a perfect\n\
153      generators because p is uniformly distributed and will appear in any\n\
154      finite interval from time to time.  Even if a test run returns more than\n\
155      one weak result, you cannot be certain that the generator is failing.\n\
156      RA mode adds psamples (usually in blocks of 100) until the\n\
157      test result ends up solidly not weak or proceeds to unambiguous failure.\n\
158      This is morally equivalent to running the test several times to see if a\n\
159      weak result is reproducible, but eliminates the bias of personal\n\
160      judgement in the process since the default failure threshold is very\n\
161      small and very unlikely to be reached by random chance even in many\n\
162      runs.\n\
163 \n\
164      This option should only be used with -k 2.\n\
165 \n\
166      2 - 'test to destruction' (T2D) mode.  Sometimes you just want to know\n\
167      where or if a generator will .I ever fail a test (or test series).  -Y 2\n\
168      causes psamples to be added 100 at a time until a test returns an\n\
169      overall pvalue lower than the failure threshold or a specified maximum\n\
170      number of psamples (see -P) is reached.  \n");
171 fprintf(stdout, "\n\
172      Note well!  In this mode one may well fail due to the alternate\n\
173      null hypothesis -- the test itself is a bad test and fails!  Many\n\
174      dieharder tests, despite our best efforts, are numerically unstable or\n\
175      have only approximately known target statistics or are straight up\n\
176      asymptotic results, and will eventually return a failing result even for\n\
177      a gold-standard generator (such as AES), or for the hypercautious the\n\
178      XOR generator with AES, threefish, kiss, all loaded at once and xor'd\n\
179      together.  It is therefore safest to use this mode comparatively,\n\
180      executing a T2D run on AES to get an idea of the test failure\n\
181      threshold(s) (something I will eventually do and publish on the web so\n\
182      everybody doesn't have to do it independently) and then running it on\n\
183      your target generator.  Failure with numbers of psamples within an order\n\
184      of magnitude of the AES thresholds should probably be considered\n\
185      possible test failures, not generator failures.  Failures at levels\n\
186      significantly less than the known gold standard generator failure\n\
187      thresholds are, of course, probably failures of the generator.\n\
188 \n\
189      This option should only be used with -k 2.\n\
190 \n\
191   -v verbose flag -- controls the verbosity of the output for debugging\n\
192      only.  Probably of little use to non-developers, and developers can\n\
193      read the enum(s) in dieharder.h and the test sources to see which\n\
194      flag values turn on output on which routines.  1 is 'all' and will\n\
195      result in a highly detailed trace of program activity.\n\
196 \n\
197      -x,-y,-z number - Some tests have parameters that can safely be varied\n\
198      from their default value.  For example, in the diehard birthdays test,\n\
199      one can vary the number of 'dates' drawn from the 'year' of some\n\
200      length, which can also be varied.  -x 2048 -y 30 alters these two values\n\
201      but should still run fine.  These parameters should be documented\n\
202      internally (where they exist) in the e.g. -d 0 -h visible notes.\n\
203 \n\
204   NOTE WELL:  The assessment(s) for the rngs may, in fact, be completely\n\
205   incorrect or misleading.  In particular, 'Weak' pvalues should occur\n\
206   one test in a hundred, and 'Failed' pvalues should occur one test in\n\
207   a thousand -- that's what p MEANS.  Use them at your Own Risk!  Be Warned!\n\
208 \n");
209 
210  exit(0);
211 
212 }
213 
214 /* Cruft
215  * This is a standard way of putting out a test description
216 void help_test(Dtest *dtest)
217 {
218 
219  printf("%s",dtest->description);
220 
221 }
222  */
223