1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 /* ////////////////////////////////////////////////////////////////////
44 //
45 // Filling CvMat/IplImage instances with random numbers
46 //
47 // */
48
49 #include "precomp.hpp"
50
51 namespace cv
52 {
53
54 ///////////////////////////// Functions Declaration //////////////////////////////////////
55
56 /*
57 Multiply-with-carry generator is used here:
58 temp = ( A*X(n) + carry )
59 X(n+1) = temp mod (2^32)
60 carry = temp / (2^32)
61 */
62
63 #define RNG_NEXT(x) ((uint64)(unsigned)(x)*CV_RNG_COEFF + ((x) >> 32))
64
65 /***************************************************************************************\
66 * Pseudo-Random Number Generators (PRNGs) *
67 \***************************************************************************************/
68
69 template<typename T> static void
randBits_(T * arr,int len,uint64 * state,const Vec2i * p,bool small_flag)70 randBits_( T* arr, int len, uint64* state, const Vec2i* p, bool small_flag )
71 {
72 uint64 temp = *state;
73 int i;
74
75 if( !small_flag )
76 {
77 for( i = 0; i <= len - 4; i += 4 )
78 {
79 int t0, t1;
80
81 temp = RNG_NEXT(temp);
82 t0 = ((int)temp & p[i][0]) + p[i][1];
83 temp = RNG_NEXT(temp);
84 t1 = ((int)temp & p[i+1][0]) + p[i+1][1];
85 arr[i] = saturate_cast<T>(t0);
86 arr[i+1] = saturate_cast<T>(t1);
87
88 temp = RNG_NEXT(temp);
89 t0 = ((int)temp & p[i+2][0]) + p[i+2][1];
90 temp = RNG_NEXT(temp);
91 t1 = ((int)temp & p[i+3][0]) + p[i+3][1];
92 arr[i+2] = saturate_cast<T>(t0);
93 arr[i+3] = saturate_cast<T>(t1);
94 }
95 }
96 else
97 {
98 for( i = 0; i <= len - 4; i += 4 )
99 {
100 int t0, t1, t;
101 temp = RNG_NEXT(temp);
102 t = (int)temp;
103 t0 = (t & p[i][0]) + p[i][1];
104 t1 = ((t >> 8) & p[i+1][0]) + p[i+1][1];
105 arr[i] = saturate_cast<T>(t0);
106 arr[i+1] = saturate_cast<T>(t1);
107
108 t0 = ((t >> 16) & p[i+2][0]) + p[i+2][1];
109 t1 = ((t >> 24) & p[i+3][0]) + p[i+3][1];
110 arr[i+2] = saturate_cast<T>(t0);
111 arr[i+3] = saturate_cast<T>(t1);
112 }
113 }
114
115 for( ; i < len; i++ )
116 {
117 int t0;
118 temp = RNG_NEXT(temp);
119
120 t0 = ((int)temp & p[i][0]) + p[i][1];
121 arr[i] = saturate_cast<T>(t0);
122 }
123
124 *state = temp;
125 }
126
127 struct DivStruct
128 {
129 unsigned d;
130 unsigned M;
131 int sh1, sh2;
132 int delta;
133 };
134
135 template<typename T> static void
randi_(T * arr,int len,uint64 * state,const DivStruct * p)136 randi_( T* arr, int len, uint64* state, const DivStruct* p )
137 {
138 uint64 temp = *state;
139 for( int i = 0; i < len; i++ )
140 {
141 temp = RNG_NEXT(temp);
142 unsigned t = (unsigned)temp;
143 unsigned v = (unsigned)(((uint64)t * p[i].M) >> 32);
144 v = (v + ((t - v) >> p[i].sh1)) >> p[i].sh2;
145 v = t - v*p[i].d + p[i].delta;
146 arr[i] = saturate_cast<T>((int)v);
147 }
148 *state = temp;
149 }
150
151
152 #define DEF_RANDI_FUNC(suffix, type) \
153 static void randBits_##suffix(type* arr, int len, uint64* state, \
154 const Vec2i* p, void*, bool small_flag) \
155 { randBits_(arr, len, state, p, small_flag); } \
156 \
157 static void randi_##suffix(type* arr, int len, uint64* state, \
158 const DivStruct* p, void*, bool ) \
159 { randi_(arr, len, state, p); }
160
161 DEF_RANDI_FUNC(8u, uchar)
162 DEF_RANDI_FUNC(8s, schar)
163 DEF_RANDI_FUNC(16u, ushort)
164 DEF_RANDI_FUNC(16s, short)
165 DEF_RANDI_FUNC(32s, int)
166
randf_32f(float * arr,int len,uint64 * state,const Vec2f * p,void *,bool)167 static void randf_32f( float* arr, int len, uint64* state, const Vec2f* p, void*, bool )
168 {
169 uint64 temp = *state;
170 for( int i = 0; i < len; i++ )
171 {
172 int t = (int)(temp = RNG_NEXT(temp));
173 arr[i] = (float)(t*p[i][0]);
174 }
175 *state = temp;
176
177 // add bias separately to make the generated random numbers
178 // more deterministic, independent of
179 // architecture details (FMA instruction use etc.)
180 hal::addRNGBias32f(arr, &p[0][0], len);
181 }
182
183 static void
randf_64f(double * arr,int len,uint64 * state,const Vec2d * p,void *,bool)184 randf_64f( double* arr, int len, uint64* state, const Vec2d* p, void*, bool )
185 {
186 uint64 temp = *state;
187 for( int i = 0; i < len; i++ )
188 {
189 temp = RNG_NEXT(temp);
190 int64 v = (temp >> 32)|(temp << 32);
191 arr[i] = v*p[i][0];
192 }
193 *state = temp;
194
195 hal::addRNGBias64f(arr, &p[0][0], len);
196 }
197
randf_16f(float16_t * arr,int len,uint64 * state,const Vec2f * p,float * fbuf,bool)198 static void randf_16f( float16_t* arr, int len, uint64* state, const Vec2f* p, float* fbuf, bool )
199 {
200 uint64 temp = *state;
201 for( int i = 0; i < len; i++ )
202 {
203 float f = (float)(int)(temp = RNG_NEXT(temp));
204 fbuf[i] = f*p[i][0];
205 }
206 *state = temp;
207
208 // add bias separately to make the generated random numbers
209 // more deterministic, independent of
210 // architecture details (FMA instruction use etc.)
211 hal::addRNGBias32f(fbuf, &p[0][0], len);
212 hal::cvt32f16f(fbuf, arr, len);
213 }
214
215 typedef void (*RandFunc)(uchar* arr, int len, uint64* state, const void* p, void* tempbuf, bool small_flag);
216
217
218 static RandFunc randTab[][8] =
219 {
220 {
221 (RandFunc)randi_8u, (RandFunc)randi_8s, (RandFunc)randi_16u, (RandFunc)randi_16s,
222 (RandFunc)randi_32s, (RandFunc)randf_32f, (RandFunc)randf_64f, (RandFunc)randf_16f
223 },
224 {
225 (RandFunc)randBits_8u, (RandFunc)randBits_8s, (RandFunc)randBits_16u, (RandFunc)randBits_16s,
226 (RandFunc)randBits_32s, 0, 0, 0
227 }
228 };
229
230 /*
231 The code below implements the algorithm described in
232 "The Ziggurat Method for Generating Random Variables"
233 by George Marsaglia and Wai Wan Tsang, Journal of Statistical Software, 2007.
234 */
235 static void
randn_0_1_32f(float * arr,int len,uint64 * state)236 randn_0_1_32f( float* arr, int len, uint64* state )
237 {
238 const float r = 3.442620f; // The start of the right tail
239 const float rng_flt = 2.3283064365386962890625e-10f; // 2^-32
240 static unsigned kn[128];
241 static float wn[128], fn[128];
242 uint64 temp = *state;
243 static bool initialized=false;
244 int i;
245
246 if( !initialized )
247 {
248 const double m1 = 2147483648.0;
249 double dn = 3.442619855899, tn = dn, vn = 9.91256303526217e-3;
250
251 // Set up the tables
252 double q = vn/std::exp(-.5*dn*dn);
253 kn[0] = (unsigned)((dn/q)*m1);
254 kn[1] = 0;
255
256 wn[0] = (float)(q/m1);
257 wn[127] = (float)(dn/m1);
258
259 fn[0] = 1.f;
260 fn[127] = (float)std::exp(-.5*dn*dn);
261
262 for(i=126;i>=1;i--)
263 {
264 dn = std::sqrt(-2.*std::log(vn/dn+std::exp(-.5*dn*dn)));
265 kn[i+1] = (unsigned)((dn/tn)*m1);
266 tn = dn;
267 fn[i] = (float)std::exp(-.5*dn*dn);
268 wn[i] = (float)(dn/m1);
269 }
270 initialized = true;
271 }
272
273 for( i = 0; i < len; i++ )
274 {
275 float x, y;
276 for(;;)
277 {
278 int hz = (int)temp;
279 temp = RNG_NEXT(temp);
280 int iz = hz & 127;
281 x = hz*wn[iz];
282 if( (unsigned)std::abs(hz) < kn[iz] )
283 break;
284 if( iz == 0) // iz==0, handles the base strip
285 {
286 do
287 {
288 x = (unsigned)temp*rng_flt;
289 temp = RNG_NEXT(temp);
290 y = (unsigned)temp*rng_flt;
291 temp = RNG_NEXT(temp);
292 x = (float)(-std::log(x+FLT_MIN)*0.2904764);
293 y = (float)-std::log(y+FLT_MIN);
294 } // .2904764 is 1/r
295 while( y + y < x*x );
296 x = hz > 0 ? r + x : -r - x;
297 break;
298 }
299 // iz > 0, handle the wedges of other strips
300 y = (unsigned)temp*rng_flt;
301 temp = RNG_NEXT(temp);
302 if( fn[iz] + y*(fn[iz - 1] - fn[iz]) < std::exp(-.5*x*x) )
303 break;
304 }
305 arr[i] = x;
306 }
307 *state = temp;
308 }
309
310
gaussian(double sigma)311 double RNG::gaussian(double sigma)
312 {
313 float temp;
314 randn_0_1_32f( &temp, 1, &state );
315 return temp*sigma;
316 }
317
318
319 template<typename T, typename PT> static void
randnScale_(const float * src,T * dst,int len,int cn,const PT * mean,const PT * stddev,bool stdmtx)320 randnScale_( const float* src, T* dst, int len, int cn, const PT* mean, const PT* stddev, bool stdmtx )
321 {
322 int i, j, k;
323 if( !stdmtx )
324 {
325 if( cn == 1 )
326 {
327 PT b = mean[0], a = stddev[0];
328 for( i = 0; i < len; i++ )
329 dst[i] = saturate_cast<T>(src[i]*a + b);
330 }
331 else
332 {
333 for( i = 0; i < len; i++, src += cn, dst += cn )
334 for( k = 0; k < cn; k++ )
335 dst[k] = saturate_cast<T>(src[k]*stddev[k] + mean[k]);
336 }
337 }
338 else
339 {
340 for( i = 0; i < len; i++, src += cn, dst += cn )
341 {
342 for( j = 0; j < cn; j++ )
343 {
344 PT s = mean[j];
345 for( k = 0; k < cn; k++ )
346 s += src[k]*stddev[j*cn + k];
347 dst[j] = saturate_cast<T>(s);
348 }
349 }
350 }
351 }
352
randnScale_8u(const float * src,uchar * dst,int len,int cn,const float * mean,const float * stddev,bool stdmtx)353 static void randnScale_8u( const float* src, uchar* dst, int len, int cn,
354 const float* mean, const float* stddev, bool stdmtx )
355 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
356
randnScale_8s(const float * src,schar * dst,int len,int cn,const float * mean,const float * stddev,bool stdmtx)357 static void randnScale_8s( const float* src, schar* dst, int len, int cn,
358 const float* mean, const float* stddev, bool stdmtx )
359 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
360
randnScale_16u(const float * src,ushort * dst,int len,int cn,const float * mean,const float * stddev,bool stdmtx)361 static void randnScale_16u( const float* src, ushort* dst, int len, int cn,
362 const float* mean, const float* stddev, bool stdmtx )
363 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
364
randnScale_16s(const float * src,short * dst,int len,int cn,const float * mean,const float * stddev,bool stdmtx)365 static void randnScale_16s( const float* src, short* dst, int len, int cn,
366 const float* mean, const float* stddev, bool stdmtx )
367 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
368
randnScale_32s(const float * src,int * dst,int len,int cn,const float * mean,const float * stddev,bool stdmtx)369 static void randnScale_32s( const float* src, int* dst, int len, int cn,
370 const float* mean, const float* stddev, bool stdmtx )
371 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
372
randnScale_32f(const float * src,float * dst,int len,int cn,const float * mean,const float * stddev,bool stdmtx)373 static void randnScale_32f( const float* src, float* dst, int len, int cn,
374 const float* mean, const float* stddev, bool stdmtx )
375 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
376
randnScale_64f(const float * src,double * dst,int len,int cn,const double * mean,const double * stddev,bool stdmtx)377 static void randnScale_64f( const float* src, double* dst, int len, int cn,
378 const double* mean, const double* stddev, bool stdmtx )
379 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
380
381 typedef void (*RandnScaleFunc)(const float* src, uchar* dst, int len, int cn,
382 const uchar*, const uchar*, bool);
383
384 static RandnScaleFunc randnScaleTab[] =
385 {
386 (RandnScaleFunc)randnScale_8u, (RandnScaleFunc)randnScale_8s, (RandnScaleFunc)randnScale_16u,
387 (RandnScaleFunc)randnScale_16s, (RandnScaleFunc)randnScale_32s, (RandnScaleFunc)randnScale_32f,
388 (RandnScaleFunc)randnScale_64f, 0
389 };
390
fill(InputOutputArray _mat,int disttype,InputArray _param1arg,InputArray _param2arg,bool saturateRange)391 void RNG::fill( InputOutputArray _mat, int disttype,
392 InputArray _param1arg, InputArray _param2arg, bool saturateRange )
393 {
394 CV_Assert(!_mat.empty());
395
396 Mat mat = _mat.getMat(), _param1 = _param1arg.getMat(), _param2 = _param2arg.getMat();
397 int depth = mat.depth(), cn = mat.channels();
398 AutoBuffer<double> _parambuf;
399 int j, k;
400 bool fast_int_mode = false;
401 bool smallFlag = true;
402 RandFunc func = 0;
403 RandnScaleFunc scaleFunc = 0;
404
405 CV_Assert(_param1.channels() == 1 && (_param1.rows == 1 || _param1.cols == 1) &&
406 (_param1.rows + _param1.cols - 1 == cn || _param1.rows + _param1.cols - 1 == 1 ||
407 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4)));
408 CV_Assert( _param2.channels() == 1 &&
409 (((_param2.rows == 1 || _param2.cols == 1) &&
410 (_param2.rows + _param2.cols - 1 == cn || _param2.rows + _param2.cols - 1 == 1 ||
411 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))) ||
412 (_param2.rows == cn && _param2.cols == cn && disttype == NORMAL)));
413
414 Vec2i* ip = 0;
415 Vec2d* dp = 0;
416 Vec2f* fp = 0;
417 DivStruct* ds = 0;
418 uchar* mean = 0;
419 uchar* stddev = 0;
420 bool stdmtx = false;
421 int n1 = (int)_param1.total();
422 int n2 = (int)_param2.total();
423
424 if( disttype == UNIFORM )
425 {
426 _parambuf.allocate(cn*8 + n1 + n2);
427 double* parambuf = _parambuf.data();
428 double* p1 = _param1.ptr<double>();
429 double* p2 = _param2.ptr<double>();
430
431 if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn )
432 {
433 Mat tmp(_param1.size(), CV_64F, parambuf);
434 _param1.convertTo(tmp, CV_64F);
435 p1 = parambuf;
436 if( n1 < cn )
437 for( j = n1; j < cn; j++ )
438 p1[j] = p1[j-n1];
439 }
440
441 if( !_param2.isContinuous() || _param2.type() != CV_64F || n2 != cn )
442 {
443 Mat tmp(_param2.size(), CV_64F, parambuf + cn);
444 _param2.convertTo(tmp, CV_64F);
445 p2 = parambuf + cn;
446 if( n2 < cn )
447 for( j = n2; j < cn; j++ )
448 p2[j] = p2[j-n2];
449 }
450
451 if( depth <= CV_32S )
452 {
453 ip = (Vec2i*)(parambuf + cn*2);
454 for( j = 0, fast_int_mode = true; j < cn; j++ )
455 {
456 double a = std::min(p1[j], p2[j]);
457 double b = std::max(p1[j], p2[j]);
458 if( saturateRange )
459 {
460 a = std::max(a, depth == CV_8U || depth == CV_16U ? 0. :
461 depth == CV_8S ? -128. : depth == CV_16S ? -32768. : (double)INT_MIN);
462 b = std::min(b, depth == CV_8U ? 256. : depth == CV_16U ? 65536. :
463 depth == CV_8S ? 128. : depth == CV_16S ? 32768. : (double)INT_MAX);
464 }
465 ip[j][1] = cvCeil(a);
466 int idiff = ip[j][0] = cvFloor(b) - ip[j][1] - 1;
467 if (idiff < 0)
468 {
469 idiff = 0;
470 ip[j][0] = 0;
471 }
472 double diff = b - a;
473
474 fast_int_mode = fast_int_mode && diff <= 4294967296. && (idiff & (idiff+1)) == 0;
475 if( fast_int_mode )
476 smallFlag = smallFlag && (idiff <= 255);
477 else
478 {
479 if( diff > INT_MAX )
480 ip[j][0] = INT_MAX;
481 if( a < INT_MIN/2 )
482 ip[j][1] = INT_MIN/2;
483 }
484 }
485
486 if( !fast_int_mode )
487 {
488 ds = (DivStruct*)(ip + cn);
489 for( j = 0; j < cn; j++ )
490 {
491 ds[j].delta = ip[j][1];
492 unsigned d = ds[j].d = (unsigned)(ip[j][0]+1);
493 int l = 0;
494 while(((uint64)1 << l) < d)
495 l++;
496 ds[j].M = (unsigned)(((uint64)1 << 32)*(((uint64)1 << l) - d)/d) + 1;
497 ds[j].sh1 = std::min(l, 1);
498 ds[j].sh2 = std::max(l - 1, 0);
499 }
500 }
501
502 func = randTab[fast_int_mode ? 1 : 0][depth];
503 }
504 else
505 {
506 double scale = depth == CV_64F ?
507 5.4210108624275221700372640043497e-20 : // 2**-64
508 2.3283064365386962890625e-10; // 2**-32
509 double maxdiff = saturateRange ? (double)FLT_MAX : DBL_MAX;
510
511 // for each channel i compute such dparam[0][i] & dparam[1][i],
512 // so that a signed 32/64-bit integer X is transformed to
513 // the range [param1.val[i], param2.val[i]) using
514 // dparam[0][i]*X + dparam[1][i]
515 if( depth != CV_64F )
516 {
517 fp = (Vec2f*)(parambuf + cn*2);
518 for( j = 0; j < cn; j++ )
519 {
520 fp[j][0] = (float)(std::min(maxdiff, p2[j] - p1[j])*scale);
521 fp[j][1] = (float)((p2[j] + p1[j])*0.5);
522 }
523 }
524 else
525 {
526 dp = (Vec2d*)(parambuf + cn*2);
527 for( j = 0; j < cn; j++ )
528 {
529 dp[j][0] = std::min(DBL_MAX, p2[j] - p1[j])*scale;
530 dp[j][1] = ((p2[j] + p1[j])*0.5);
531 }
532 }
533
534 func = randTab[0][depth];
535 }
536 CV_Assert( func != 0 );
537 }
538 else if( disttype == CV_RAND_NORMAL )
539 {
540 _parambuf.allocate(MAX(n1, cn) + MAX(n2, cn));
541 double* parambuf = _parambuf.data();
542
543 int ptype = depth == CV_64F ? CV_64F : CV_32F;
544 int esz = (int)CV_ELEM_SIZE(ptype);
545
546 if( _param1.isContinuous() && _param1.type() == ptype && n1 >= cn)
547 mean = _param1.ptr();
548 else
549 {
550 Mat tmp(_param1.size(), ptype, parambuf);
551 _param1.convertTo(tmp, ptype);
552 mean = (uchar*)parambuf;
553 }
554
555 if( n1 < cn )
556 for( j = n1*esz; j < cn*esz; j++ )
557 mean[j] = mean[j - n1*esz];
558
559 if( _param2.isContinuous() && _param2.type() == ptype && n2 >= cn)
560 stddev = _param2.ptr();
561 else
562 {
563 Mat tmp(_param2.size(), ptype, parambuf + MAX(n1, cn));
564 _param2.convertTo(tmp, ptype);
565 stddev = (uchar*)(parambuf + MAX(n1, cn));
566 }
567
568 if( n2 < cn )
569 for( j = n2*esz; j < cn*esz; j++ )
570 stddev[j] = stddev[j - n2*esz];
571
572 stdmtx = _param2.rows == cn && _param2.cols == cn;
573 scaleFunc = randnScaleTab[depth];
574 CV_Assert( scaleFunc != 0 );
575 }
576 else
577 CV_Error( CV_StsBadArg, "Unknown distribution type" );
578
579 const Mat* arrays[] = {&mat, 0};
580 uchar* ptr;
581 NAryMatIterator it(arrays, &ptr, 1);
582 int total = (int)it.size, blockSize = std::min((BLOCK_SIZE + cn - 1)/cn, total);
583 size_t esz = mat.elemSize();
584 AutoBuffer<double> buf;
585 uchar* param = 0;
586 float* nbuf = 0;
587 float* tmpbuf = 0;
588
589 if( disttype == UNIFORM )
590 {
591 buf.allocate(blockSize*cn*4);
592 param = (uchar*)(double*)buf.data();
593
594 if( depth <= CV_32S )
595 {
596 if( !fast_int_mode )
597 {
598 DivStruct* p = (DivStruct*)param;
599 for( j = 0; j < blockSize*cn; j += cn )
600 for( k = 0; k < cn; k++ )
601 p[j + k] = ds[k];
602 }
603 else
604 {
605 Vec2i* p = (Vec2i*)param;
606 for( j = 0; j < blockSize*cn; j += cn )
607 for( k = 0; k < cn; k++ )
608 p[j + k] = ip[k];
609 }
610 }
611 else if( depth != CV_64F )
612 {
613 Vec2f* p = (Vec2f*)param;
614 for( j = 0; j < blockSize*cn; j += cn )
615 for( k = 0; k < cn; k++ )
616 p[j + k] = fp[k];
617 if( depth == CV_16F )
618 tmpbuf = (float*)p + blockSize*cn*2;
619 }
620 else
621 {
622 Vec2d* p = (Vec2d*)param;
623 for( j = 0; j < blockSize*cn; j += cn )
624 for( k = 0; k < cn; k++ )
625 p[j + k] = dp[k];
626 }
627 }
628 else
629 {
630 buf.allocate((blockSize*cn+1)/2);
631 nbuf = (float*)(double*)buf.data();
632 }
633
634 for( size_t i = 0; i < it.nplanes; i++, ++it )
635 {
636 for( j = 0; j < total; j += blockSize )
637 {
638 int len = std::min(total - j, blockSize);
639
640 if( disttype == CV_RAND_UNI )
641 func( ptr, len*cn, &state, param, tmpbuf, smallFlag );
642 else
643 {
644 randn_0_1_32f(nbuf, len*cn, &state);
645 scaleFunc(nbuf, ptr, len, cn, mean, stddev, stdmtx);
646 }
647 ptr += len*esz;
648 }
649 }
650 }
651
652 }
653
theRNG()654 cv::RNG& cv::theRNG()
655 {
656 return getCoreTlsData().rng;
657 }
658
setRNGSeed(int seed)659 void cv::setRNGSeed(int seed)
660 {
661 theRNG() = RNG(static_cast<uint64>(seed));
662 }
663
664
randu(InputOutputArray dst,InputArray low,InputArray high)665 void cv::randu(InputOutputArray dst, InputArray low, InputArray high)
666 {
667 CV_INSTRUMENT_REGION();
668
669 theRNG().fill(dst, RNG::UNIFORM, low, high);
670 }
671
randn(InputOutputArray dst,InputArray mean,InputArray stddev)672 void cv::randn(InputOutputArray dst, InputArray mean, InputArray stddev)
673 {
674 CV_INSTRUMENT_REGION();
675
676 theRNG().fill(dst, RNG::NORMAL, mean, stddev);
677 }
678
679 namespace cv
680 {
681
682 template<typename T> static void
randShuffle_(Mat & _arr,RNG & rng,double)683 randShuffle_( Mat& _arr, RNG& rng, double )
684 {
685 unsigned sz = (unsigned)_arr.total();
686 if( _arr.isContinuous() )
687 {
688 T* arr = _arr.ptr<T>();
689 for( unsigned i = 0; i < sz; i++ )
690 {
691 unsigned j = (unsigned)rng % sz;
692 std::swap( arr[j], arr[i] );
693 }
694 }
695 else
696 {
697 CV_Assert( _arr.dims <= 2 );
698 uchar* data = _arr.ptr();
699 size_t step = _arr.step;
700 int rows = _arr.rows;
701 int cols = _arr.cols;
702 for( int i0 = 0; i0 < rows; i0++ )
703 {
704 T* p = _arr.ptr<T>(i0);
705 for( int j0 = 0; j0 < cols; j0++ )
706 {
707 unsigned k1 = (unsigned)rng % sz;
708 int i1 = (int)(k1 / cols);
709 int j1 = (int)(k1 - (unsigned)i1*(unsigned)cols);
710 std::swap( p[j0], ((T*)(data + step*i1))[j1] );
711 }
712 }
713 }
714 }
715
716 typedef void (*RandShuffleFunc)( Mat& dst, RNG& rng, double iterFactor );
717
718 }
719
randShuffle(InputOutputArray _dst,double iterFactor,RNG * _rng)720 void cv::randShuffle( InputOutputArray _dst, double iterFactor, RNG* _rng )
721 {
722 CV_INSTRUMENT_REGION();
723
724 RandShuffleFunc tab[] =
725 {
726 0,
727 randShuffle_<uchar>, // 1
728 randShuffle_<ushort>, // 2
729 randShuffle_<Vec<uchar,3> >, // 3
730 randShuffle_<int>, // 4
731 0,
732 randShuffle_<Vec<ushort,3> >, // 6
733 0,
734 randShuffle_<Vec<int,2> >, // 8
735 0, 0, 0,
736 randShuffle_<Vec<int,3> >, // 12
737 0, 0, 0,
738 randShuffle_<Vec<int,4> >, // 16
739 0, 0, 0, 0, 0, 0, 0,
740 randShuffle_<Vec<int,6> >, // 24
741 0, 0, 0, 0, 0, 0, 0,
742 randShuffle_<Vec<int,8> > // 32
743 };
744
745 Mat dst = _dst.getMat();
746 RNG& rng = _rng ? *_rng : theRNG();
747 CV_Assert( dst.elemSize() <= 32 );
748 RandShuffleFunc func = tab[dst.elemSize()];
749 CV_Assert( func != 0 );
750 func( dst, rng, iterFactor );
751 }
752
753
754 #ifndef OPENCV_EXCLUDE_C_API
755
756 CV_IMPL void
cvRandArr(CvRNG * _rng,CvArr * arr,int disttype,CvScalar param1,CvScalar param2)757 cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 )
758 {
759 cv::Mat mat = cv::cvarrToMat(arr);
760 // !!! this will only work for current 64-bit MWC RNG !!!
761 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
762 rng.fill(mat, disttype == CV_RAND_NORMAL ?
763 cv::RNG::NORMAL : cv::RNG::UNIFORM, cv::Scalar(param1), cv::Scalar(param2) );
764 }
765
cvRandShuffle(CvArr * arr,CvRNG * _rng,double iter_factor)766 CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor )
767 {
768 cv::Mat dst = cv::cvarrToMat(arr);
769 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
770 cv::randShuffle( dst, iter_factor, &rng );
771 }
772
773 #endif // OPENCV_EXCLUDE_C_API
774
775
776 // Mersenne Twister random number generator.
777 // Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
778
779 /*
780 A C-program for MT19937, with initialization improved 2002/1/26.
781 Coded by Takuji Nishimura and Makoto Matsumoto.
782
783 Before using, initialize the state by using init_genrand(seed)
784 or init_by_array(init_key, key_length).
785
786 Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
787 All rights reserved.
788
789 Redistribution and use in source and binary forms, with or without
790 modification, are permitted provided that the following conditions
791 are met:
792
793 1. Redistributions of source code must retain the above copyright
794 notice, this list of conditions and the following disclaimer.
795
796 2. Redistributions in binary form must reproduce the above copyright
797 notice, this list of conditions and the following disclaimer in the
798 documentation and/or other materials provided with the distribution.
799
800 3. The names of its contributors may not be used to endorse or promote
801 products derived from this software without specific prior written
802 permission.
803
804 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
805 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
806 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
807 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
808 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
809 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
810 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
811 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
812 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
813 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
814 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
815
816
817 Any feedback is very welcome.
818 http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
819 email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
820 */
821
RNG_MT19937(unsigned s)822 cv::RNG_MT19937::RNG_MT19937(unsigned s) { seed(s); }
823
RNG_MT19937()824 cv::RNG_MT19937::RNG_MT19937() { seed(5489U); }
825
seed(unsigned s)826 void cv::RNG_MT19937::seed(unsigned s)
827 {
828 state[0]= s;
829 for (mti = 1; mti < N; mti++)
830 {
831 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
832 state[mti] = (1812433253U * (state[mti - 1] ^ (state[mti - 1] >> 30)) + mti);
833 }
834 }
835
next()836 unsigned cv::RNG_MT19937::next()
837 {
838 /* mag01[x] = x * MATRIX_A for x=0,1 */
839 static unsigned mag01[2] = { 0x0U, /*MATRIX_A*/ 0x9908b0dfU};
840
841 const unsigned UPPER_MASK = 0x80000000U;
842 const unsigned LOWER_MASK = 0x7fffffffU;
843
844 /* generate N words at one time */
845 if (mti >= N)
846 {
847 int kk = 0;
848
849 for (; kk < N - M; ++kk)
850 {
851 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK);
852 state[kk] = state[kk + M] ^ (y >> 1) ^ mag01[y & 0x1U];
853 }
854
855 for (; kk < N - 1; ++kk)
856 {
857 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK);
858 state[kk] = state[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1U];
859 }
860
861 unsigned y = (state[N - 1] & UPPER_MASK) | (state[0] & LOWER_MASK);
862 state[N - 1] = state[M - 1] ^ (y >> 1) ^ mag01[y & 0x1U];
863
864 mti = 0;
865 }
866
867 unsigned y = state[mti++];
868
869 /* Tempering */
870 y ^= (y >> 11);
871 y ^= (y << 7) & 0x9d2c5680U;
872 y ^= (y << 15) & 0xefc60000U;
873 y ^= (y >> 18);
874
875 return y;
876 }
877
operator unsigned()878 cv::RNG_MT19937::operator unsigned() { return next(); }
879
operator int()880 cv::RNG_MT19937::operator int() { return (int)next();}
881
operator float()882 cv::RNG_MT19937::operator float() { return next() * (1.f / 4294967296.f); }
883
operator double()884 cv::RNG_MT19937::operator double()
885 {
886 unsigned a = next() >> 5;
887 unsigned b = next() >> 6;
888 return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0);
889 }
890
uniform(int a,int b)891 int cv::RNG_MT19937::uniform(int a, int b) { return (int)(next() % (b - a) + a); }
892
uniform(float a,float b)893 float cv::RNG_MT19937::uniform(float a, float b) { return ((float)*this)*(b - a) + a; }
894
uniform(double a,double b)895 double cv::RNG_MT19937::uniform(double a, double b) { return ((double)*this)*(b - a) + a; }
896
operator ()(unsigned b)897 unsigned cv::RNG_MT19937::operator ()(unsigned b) { return next() % b; }
898
operator ()()899 unsigned cv::RNG_MT19937::operator ()() { return next(); }
900
901 /* End of file. */
902