1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 // License Agreement
10 // For Open Source Computer Vision Library
11 //
12 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
13 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
14 // Copyright (C) 2014, Itseez Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 /* ////////////////////////////////////////////////////////////////////
44 //
45 // Mat basic operations: Copy, Set
46 //
47 // */
48
49 #include "precomp.hpp"
50 #include "opencl_kernels_core.hpp"
51
52
53 namespace cv
54 {
55
56 template <typename T> static inline
scalarToRawData_(const Scalar & s,T * const buf,const int cn,const int unroll_to)57 void scalarToRawData_(const Scalar& s, T * const buf, const int cn, const int unroll_to)
58 {
59 int i = 0;
60 for(; i < cn; i++)
61 buf[i] = saturate_cast<T>(s.val[i]);
62 for(; i < unroll_to; i++)
63 buf[i] = buf[i-cn];
64 }
65
scalarToRawData(const Scalar & s,void * _buf,int type,int unroll_to)66 void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
67 {
68 CV_INSTRUMENT_REGION();
69
70 const int depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
71 CV_Assert(cn <= 4);
72 switch(depth)
73 {
74 case CV_8U:
75 scalarToRawData_<uchar>(s, (uchar*)_buf, cn, unroll_to);
76 break;
77 case CV_8S:
78 scalarToRawData_<schar>(s, (schar*)_buf, cn, unroll_to);
79 break;
80 case CV_16U:
81 scalarToRawData_<ushort>(s, (ushort*)_buf, cn, unroll_to);
82 break;
83 case CV_16S:
84 scalarToRawData_<short>(s, (short*)_buf, cn, unroll_to);
85 break;
86 case CV_32S:
87 scalarToRawData_<int>(s, (int*)_buf, cn, unroll_to);
88 break;
89 case CV_32F:
90 scalarToRawData_<float>(s, (float*)_buf, cn, unroll_to);
91 break;
92 case CV_64F:
93 scalarToRawData_<double>(s, (double*)_buf, cn, unroll_to);
94 break;
95 case CV_16F:
96 scalarToRawData_<float16_t>(s, (float16_t*)_buf, cn, unroll_to);
97 break;
98 default:
99 CV_Error(CV_StsUnsupportedFormat,"");
100 }
101 }
102
convertAndUnrollScalar(const Mat & sc,int buftype,uchar * scbuf,size_t blocksize)103 void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize )
104 {
105 int scn = (int)sc.total(), cn = CV_MAT_CN(buftype);
106 size_t esz = CV_ELEM_SIZE(buftype);
107 BinaryFunc cvtFn = getConvertFunc(sc.depth(), buftype);
108 CV_Assert(cvtFn);
109 cvtFn(sc.ptr(), 1, 0, 1, scbuf, 1, Size(std::min(cn, scn), 1), 0);
110 // unroll the scalar
111 if( scn < cn )
112 {
113 CV_Assert( scn == 1 );
114 size_t esz1 = CV_ELEM_SIZE1(buftype);
115 for( size_t i = esz1; i < esz; i++ )
116 scbuf[i] = scbuf[i - esz1];
117 }
118 for( size_t i = esz; i < blocksize*esz; i++ )
119 scbuf[i] = scbuf[i - esz];
120 }
121
122 template<typename T> static void
copyMask_(const uchar * _src,size_t sstep,const uchar * mask,size_t mstep,uchar * _dst,size_t dstep,Size size)123 copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
124 {
125 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
126 {
127 const T* src = (const T*)_src;
128 T* dst = (T*)_dst;
129 int x = 0;
130 #if CV_ENABLE_UNROLLED
131 for( ; x <= size.width - 4; x += 4 )
132 {
133 if( mask[x] )
134 dst[x] = src[x];
135 if( mask[x+1] )
136 dst[x+1] = src[x+1];
137 if( mask[x+2] )
138 dst[x+2] = src[x+2];
139 if( mask[x+3] )
140 dst[x+3] = src[x+3];
141 }
142 #endif
143 for( ; x < size.width; x++ )
144 if( mask[x] )
145 dst[x] = src[x];
146 }
147 }
148
149 template<> void
copyMask_(const uchar * _src,size_t sstep,const uchar * mask,size_t mstep,uchar * _dst,size_t dstep,Size size)150 copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
151 {
152 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1MR, _src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
153
154 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
155 {
156 const uchar* src = (const uchar*)_src;
157 uchar* dst = (uchar*)_dst;
158 int x = 0;
159 #if CV_SIMD
160 {
161 v_uint8 v_zero = vx_setzero_u8();
162
163 for( ; x <= size.width - v_uint8::nlanes; x += v_uint8::nlanes )
164 {
165 v_uint8 v_src = vx_load(src + x),
166 v_dst = vx_load(dst + x),
167 v_nmask = vx_load(mask + x) == v_zero;
168
169 v_dst = v_select(v_nmask, v_dst, v_src);
170 v_store(dst + x, v_dst);
171 }
172 }
173 vx_cleanup();
174 #endif
175 for( ; x < size.width; x++ )
176 if( mask[x] )
177 dst[x] = src[x];
178 }
179 }
180
181 template<> void
copyMask_(const uchar * _src,size_t sstep,const uchar * mask,size_t mstep,uchar * _dst,size_t dstep,Size size)182 copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
183 {
184 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_16u_C1MR, (const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
185
186 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
187 {
188 const ushort* src = (const ushort*)_src;
189 ushort* dst = (ushort*)_dst;
190 int x = 0;
191 #if CV_SIMD
192 {
193 v_uint8 v_zero = vx_setzero_u8();
194
195 for( ; x <= size.width - v_uint8::nlanes; x += v_uint8::nlanes )
196 {
197 v_uint16 v_src1 = vx_load(src + x), v_src2 = vx_load(src + x + v_uint16::nlanes),
198 v_dst1 = vx_load(dst + x), v_dst2 = vx_load(dst + x + v_uint16::nlanes);
199
200 v_uint8 v_nmask1, v_nmask2;
201 v_uint8 v_nmask = vx_load(mask + x) == v_zero;
202 v_zip(v_nmask, v_nmask, v_nmask1, v_nmask2);
203
204 v_dst1 = v_select(v_reinterpret_as_u16(v_nmask1), v_dst1, v_src1);
205 v_dst2 = v_select(v_reinterpret_as_u16(v_nmask2), v_dst2, v_src2);
206 v_store(dst + x, v_dst1);
207 v_store(dst + x + v_uint16::nlanes, v_dst2);
208 }
209 }
210 vx_cleanup();
211 #endif
212 for( ; x < size.width; x++ )
213 if( mask[x] )
214 dst[x] = src[x];
215 }
216 }
217
218 static void
copyMaskGeneric(const uchar * _src,size_t sstep,const uchar * mask,size_t mstep,uchar * _dst,size_t dstep,Size size,void * _esz)219 copyMaskGeneric(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size, void* _esz)
220 {
221 size_t k, esz = *(size_t*)_esz;
222 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
223 {
224 const uchar* src = _src;
225 uchar* dst = _dst;
226 int x = 0;
227 for( ; x < size.width; x++, src += esz, dst += esz )
228 {
229 if( !mask[x] )
230 continue;
231 for( k = 0; k < esz; k++ )
232 dst[k] = src[k];
233 }
234 }
235 }
236
237
238 #define DEF_COPY_MASK(suffix, type) \
239 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
240 uchar* dst, size_t dstep, Size size, void*) \
241 { \
242 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
243 }
244
245 #if defined HAVE_IPP
246 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
247 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
248 uchar* dst, size_t dstep, Size size, void*) \
249 { \
250 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_##ippfavor, (const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0)\
251 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
252 }
253 #else
254 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
255 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
256 uchar* dst, size_t dstep, Size size, void*) \
257 { \
258 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
259 }
260 #endif
261
262 #if IPP_VERSION_X100 == 901 // bug in IPP 9.0.1
263 DEF_COPY_MASK(32sC3, Vec3i)
264 DEF_COPY_MASK(8uC3, Vec3b)
265 #else
266 DEF_COPY_MASK_F(8uC3, Vec3b, 8u_C3MR, Ipp8u)
267 DEF_COPY_MASK_F(32sC3, Vec3i, 32s_C3MR, Ipp32s)
268 #endif
269 DEF_COPY_MASK(8u, uchar)
270 DEF_COPY_MASK(16u, ushort)
271 DEF_COPY_MASK_F(32s, int, 32s_C1MR, Ipp32s)
272 DEF_COPY_MASK_F(16uC3, Vec3s, 16u_C3MR, Ipp16u)
273 DEF_COPY_MASK(32sC2, Vec2i)
274 DEF_COPY_MASK_F(32sC4, Vec4i, 32s_C4MR, Ipp32s)
275 DEF_COPY_MASK(32sC6, Vec6i)
276 DEF_COPY_MASK(32sC8, Vec8i)
277
278 BinaryFunc copyMaskTab[] =
279 {
280 0,
281 copyMask8u,
282 copyMask16u,
283 copyMask8uC3,
284 copyMask32s,
285 0,
286 copyMask16uC3,
287 0,
288 copyMask32sC2,
289 0, 0, 0,
290 copyMask32sC3,
291 0, 0, 0,
292 copyMask32sC4,
293 0, 0, 0, 0, 0, 0, 0,
294 copyMask32sC6,
295 0, 0, 0, 0, 0, 0, 0,
296 copyMask32sC8
297 };
298
getCopyMaskFunc(size_t esz)299 BinaryFunc getCopyMaskFunc(size_t esz)
300 {
301 return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric;
302 }
303
304 /* dst = src */
copyTo(OutputArray _dst) const305 void Mat::copyTo( OutputArray _dst ) const
306 {
307 CV_INSTRUMENT_REGION();
308
309 #ifdef HAVE_CUDA
310 if (_dst.isGpuMat())
311 {
312 _dst.getGpuMat().upload(*this);
313 return;
314 }
315 #endif
316
317 int dtype = _dst.type();
318 if( _dst.fixedType() && dtype != type() )
319 {
320 CV_Assert( channels() == CV_MAT_CN(dtype) );
321 convertTo( _dst, dtype );
322 return;
323 }
324
325 if( empty() )
326 {
327 _dst.release();
328 return;
329 }
330
331 if( _dst.isUMat() )
332 {
333 _dst.create( dims, size.p, type() );
334 UMat dst = _dst.getUMat();
335 CV_Assert(dst.u != NULL);
336 size_t i, sz[CV_MAX_DIM] = {0}, dstofs[CV_MAX_DIM], esz = elemSize();
337 CV_Assert(dims > 0 && dims < CV_MAX_DIM);
338 for( i = 0; i < (size_t)dims; i++ )
339 sz[i] = size.p[i];
340 sz[dims-1] *= esz;
341 dst.ndoffset(dstofs);
342 dstofs[dims-1] *= esz;
343 dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p);
344 return;
345 }
346
347 if( dims <= 2 )
348 {
349 _dst.create( rows, cols, type() );
350 Mat dst = _dst.getMat();
351 if( data == dst.data )
352 return;
353
354 if( rows > 0 && cols > 0 )
355 {
356 Mat src = *this;
357 Size sz = getContinuousSize2D(src, dst, (int)elemSize());
358 CV_CheckGE(sz.width, 0, "");
359
360 const uchar* sptr = src.data;
361 uchar* dptr = dst.data;
362
363 #if IPP_VERSION_X100 >= 201700
364 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1R_L, sptr, (int)src.step, dptr, (int)dst.step, ippiSizeL(sz.width, sz.height)) >= 0)
365 #endif
366
367 for (; sz.height--; sptr += src.step, dptr += dst.step)
368 memcpy(dptr, sptr, sz.width);
369 }
370 return;
371 }
372
373 _dst.create( dims, size, type() );
374 Mat dst = _dst.getMat();
375 if( data == dst.data )
376 return;
377
378 if( total() != 0 )
379 {
380 const Mat* arrays[] = { this, &dst };
381 uchar* ptrs[2] = {};
382 NAryMatIterator it(arrays, ptrs, 2);
383 size_t sz = it.size*elemSize();
384
385 for( size_t i = 0; i < it.nplanes; i++, ++it )
386 memcpy(ptrs[1], ptrs[0], sz);
387 }
388 }
389
390 #ifdef HAVE_IPP
ipp_copyTo(const Mat & src,Mat & dst,const Mat & mask)391 static bool ipp_copyTo(const Mat &src, Mat &dst, const Mat &mask)
392 {
393 #ifdef HAVE_IPP_IW_LL
394 CV_INSTRUMENT_REGION_IPP();
395
396 if(mask.channels() > 1 || mask.depth() != CV_8U)
397 return false;
398
399 if (src.dims <= 2)
400 {
401 IppiSize size = ippiSize(src.size());
402 return CV_INSTRUMENT_FUN_IPP(llwiCopyMask, src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, size, (int)src.elemSize1(), src.channels(), mask.ptr(), (int)mask.step) >= 0;
403 }
404 else
405 {
406 const Mat *arrays[] = {&src, &dst, &mask, NULL};
407 uchar *ptrs[3] = {NULL};
408 NAryMatIterator it(arrays, ptrs);
409
410 IppiSize size = ippiSize(it.size, 1);
411
412 for (size_t i = 0; i < it.nplanes; i++, ++it)
413 {
414 if(CV_INSTRUMENT_FUN_IPP(llwiCopyMask, ptrs[0], 0, ptrs[1], 0, size, (int)src.elemSize1(), src.channels(), ptrs[2], 0) < 0)
415 return false;
416 }
417 return true;
418 }
419 #else
420 CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(mask);
421 return false;
422 #endif
423 }
424 #endif
425
copyTo(OutputArray _dst,InputArray _mask) const426 void Mat::copyTo( OutputArray _dst, InputArray _mask ) const
427 {
428 CV_INSTRUMENT_REGION();
429
430 Mat mask = _mask.getMat();
431 if( !mask.data )
432 {
433 copyTo(_dst);
434 return;
435 }
436
437 int cn = channels(), mcn = mask.channels();
438 CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
439 bool colorMask = mcn > 1;
440 if( dims <= 2 )
441 {
442 CV_Assert( size() == mask.size() );
443 }
444
445 Mat dst;
446 {
447 Mat dst0 = _dst.getMat();
448 _dst.create(dims, size, type()); // TODO Prohibit 'dst' re-creation, user should pass it explicitly with correct size/type or empty
449 dst = _dst.getMat();
450
451 if (dst.data != dst0.data) // re-allocation happened
452 {
453 #ifdef OPENCV_FUTURE
454 CV_Assert(dst0.empty() &&
455 "copyTo(): dst size/type mismatch (looks like a bug) - use dst.release() before copyTo() call to suppress this message");
456 #endif
457 dst = Scalar(0); // do not leave dst uninitialized
458 }
459 }
460
461 CV_IPP_RUN_FAST(ipp_copyTo(*this, dst, mask))
462
463 size_t esz = colorMask ? elemSize1() : elemSize();
464 BinaryFunc copymask = getCopyMaskFunc(esz);
465
466 if( dims <= 2 )
467 {
468 Mat src = *this;
469 Size sz = getContinuousSize2D(src, dst, mask, mcn);
470 copymask(src.data, src.step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
471 return;
472 }
473
474 const Mat* arrays[] = { this, &dst, &mask, 0 };
475 uchar* ptrs[3] = {};
476 NAryMatIterator it(arrays, ptrs);
477 Size sz((int)(it.size*mcn), 1);
478
479 for( size_t i = 0; i < it.nplanes; i++, ++it )
480 copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
481 }
482
483
can_apply_memset(const Mat & mat,const Scalar & s,int & fill_value)484 static bool can_apply_memset(const Mat &mat, const Scalar &s, int &fill_value)
485 {
486 // check if depth is 1 byte.
487 switch (mat.depth())
488 {
489 case CV_8U: fill_value = saturate_cast<uchar>( s.val[0] ); break;
490 case CV_8S: fill_value = saturate_cast<schar>( s.val[0] ); break;
491 default: return false;
492 }
493
494 // check if all element is same.
495 const int64* is = (const int64*)&s.val[0];
496 switch (mat.channels())
497 {
498 case 1: return true;
499 case 2: return (is[0] == is[1]);
500 case 3: return (is[0] == is[1] && is[1] == is[2]);
501 case 4: return (is[0] == is[1] && is[1] == is[2] && is[2] == is[3]);
502 default: return false;
503 }
504 }
505
operator =(const Scalar & s)506 Mat& Mat::operator = (const Scalar& s)
507 {
508 CV_INSTRUMENT_REGION();
509
510 if (this->empty())
511 return *this;
512
513 const Mat* arrays[] = { this };
514 uchar* dptr;
515 NAryMatIterator it(arrays, &dptr, 1);
516 size_t elsize = it.size*elemSize();
517 const int64* is = (const int64*)&s.val[0];
518
519 if( is[0] == 0 && is[1] == 0 && is[2] == 0 && is[3] == 0 )
520 {
521 for( size_t i = 0; i < it.nplanes; i++, ++it )
522 memset( dptr, 0, elsize );
523 }
524 else
525 {
526 int fill_value = 0;
527 if ( can_apply_memset(*this, s, fill_value) )
528 {
529 for (size_t i = 0; i < it.nplanes; i++, ++it)
530 memset(dptr, fill_value, elsize);
531 return *this;
532 }
533
534 if( it.nplanes > 0 )
535 {
536 double scalar[12];
537 scalarToRawData(s, scalar, type(), 12);
538 size_t blockSize = 12*elemSize1();
539
540 for( size_t j = 0; j < elsize; j += blockSize )
541 {
542 size_t sz = MIN(blockSize, elsize - j);
543 CV_Assert(sz <= sizeof(scalar));
544 memcpy( dptr + j, scalar, sz );
545 }
546 }
547
548 for( size_t i = 1; i < it.nplanes; i++ )
549 {
550 ++it;
551 memcpy( dptr, data, elsize );
552 }
553 }
554 return *this;
555 }
556
557 #ifdef HAVE_IPP
ipp_Mat_setTo_Mat(Mat & dst,Mat & _val,Mat & mask)558 static bool ipp_Mat_setTo_Mat(Mat &dst, Mat &_val, Mat &mask)
559 {
560 #ifdef HAVE_IPP_IW_LL
561 CV_INSTRUMENT_REGION_IPP();
562
563 if(mask.empty())
564 return false;
565
566 if(mask.depth() != CV_8U || mask.channels() > 1)
567 return false;
568
569 if(dst.channels() > 4)
570 return false;
571
572 if (dst.depth() == CV_32F)
573 {
574 for (int i = 0; i < (int)(_val.total()); i++)
575 {
576 float v = (float)(_val.at<double>(i)); // cast to float
577 if (cvIsNaN(v) || cvIsInf(v)) // accept finite numbers only
578 return false;
579 }
580 }
581
582 if(dst.dims <= 2)
583 {
584 IppiSize size = ippiSize(dst.size());
585 IppDataType dataType = ippiGetDataType(dst.depth());
586 ::ipp::IwValueFloat s;
587 convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
588
589 return CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, dst.ptr(), (int)dst.step, size, dataType, dst.channels(), mask.ptr(), (int)mask.step) >= 0;
590 }
591 else
592 {
593 const Mat *arrays[] = {&dst, mask.empty()?NULL:&mask, NULL};
594 uchar *ptrs[2] = {NULL};
595 NAryMatIterator it(arrays, ptrs);
596
597 IppiSize size = {(int)it.size, 1};
598 IppDataType dataType = ippiGetDataType(dst.depth());
599 ::ipp::IwValueFloat s;
600 convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
601
602 for( size_t i = 0; i < it.nplanes; i++, ++it)
603 {
604 if(CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, ptrs[0], 0, size, dataType, dst.channels(), ptrs[1], 0) < 0)
605 return false;
606 }
607 return true;
608 }
609 #else
610 CV_UNUSED(dst); CV_UNUSED(_val); CV_UNUSED(mask);
611 return false;
612 #endif
613 }
614 #endif
615
setTo(InputArray _value,InputArray _mask)616 Mat& Mat::setTo(InputArray _value, InputArray _mask)
617 {
618 CV_INSTRUMENT_REGION();
619
620 if( empty() )
621 return *this;
622
623 Mat value = _value.getMat(), mask = _mask.getMat();
624
625 CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
626 int cn = channels(), mcn = mask.channels();
627 CV_Assert( mask.empty() || (mask.depth() == CV_8U && (mcn == 1 || mcn == cn) && size == mask.size) );
628
629 CV_IPP_RUN_FAST(ipp_Mat_setTo_Mat(*this, value, mask), *this)
630
631 size_t esz = mcn > 1 ? elemSize1() : elemSize();
632 BinaryFunc copymask = getCopyMaskFunc(esz);
633
634 const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
635 uchar* ptrs[2]={0,0};
636 NAryMatIterator it(arrays, ptrs);
637 int totalsz = (int)it.size*mcn;
638 int blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
639 blockSize0 -= blockSize0 % mcn; // must be divisible without remainder for unrolling and advancing
640 AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
641 uchar* scbuf = alignPtr((uchar*)_scbuf.data(), (int)sizeof(double));
642 convertAndUnrollScalar( value, type(), scbuf, blockSize0/mcn );
643
644 for( size_t i = 0; i < it.nplanes; i++, ++it )
645 {
646 for( int j = 0; j < totalsz; j += blockSize0 )
647 {
648 Size sz(std::min(blockSize0, totalsz - j), 1);
649 size_t blockSize = sz.width*esz;
650 if( ptrs[1] )
651 {
652 copymask(scbuf, 0, ptrs[1], 0, ptrs[0], 0, sz, &esz);
653 ptrs[1] += sz.width;
654 }
655 else
656 memcpy(ptrs[0], scbuf, blockSize);
657 ptrs[0] += blockSize;
658 }
659 }
660 return *this;
661 }
662
663
664 #if defined HAVE_OPENCL && !defined __APPLE__
665
ocl_repeat(InputArray _src,int ny,int nx,OutputArray _dst)666 static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
667 {
668 if (ny == 1 && nx == 1)
669 {
670 _src.copyTo(_dst);
671 return true;
672 }
673
674 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
675 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1,
676 kercn = ocl::predictOptimalVectorWidth(_src, _dst);
677
678 ocl::Kernel k("repeat", ocl::core::repeat_oclsrc,
679 format("-D T=%s -D nx=%d -D ny=%d -D rowsPerWI=%d -D cn=%d",
680 ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
681 nx, ny, rowsPerWI, kercn));
682 if (k.empty())
683 return false;
684
685 UMat src = _src.getUMat(), dst = _dst.getUMat();
686 k.args(ocl::KernelArg::ReadOnly(src, cn, kercn), ocl::KernelArg::WriteOnlyNoSize(dst));
687
688 size_t globalsize[] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
689 return k.run(2, globalsize, NULL, false);
690 }
691
692 #endif
693
repeat(InputArray _src,int ny,int nx,OutputArray _dst)694 void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
695 {
696 CV_INSTRUMENT_REGION();
697
698 CV_Assert(_src.getObj() != _dst.getObj());
699 CV_Assert( _src.dims() <= 2 );
700 CV_Assert( ny > 0 && nx > 0 );
701
702 Size ssize = _src.size();
703 _dst.create(ssize.height*ny, ssize.width*nx, _src.type());
704
705 #if !defined __APPLE__
706 CV_OCL_RUN(_dst.isUMat(),
707 ocl_repeat(_src, ny, nx, _dst))
708 #endif
709
710 Mat src = _src.getMat(), dst = _dst.getMat();
711 Size dsize = dst.size();
712 int esz = (int)src.elemSize();
713 int x, y;
714 ssize.width *= esz; dsize.width *= esz;
715
716 for( y = 0; y < ssize.height; y++ )
717 {
718 for( x = 0; x < dsize.width; x += ssize.width )
719 memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
720 }
721
722 for( ; y < dsize.height; y++ )
723 memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
724 }
725
repeat(const Mat & src,int ny,int nx)726 Mat repeat(const Mat& src, int ny, int nx)
727 {
728 if( nx == 1 && ny == 1 )
729 return src;
730 Mat dst;
731 repeat(src, ny, nx, dst);
732 return dst;
733 }
734
735
736 } // cv
737
738
739 /*
740 Various border types, image boundaries are denoted with '|'
741
742 * BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
743 * BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
744 * BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
745 * BORDER_WRAP: cdefgh|abcdefgh|abcdefg
746 * BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
747 */
borderInterpolate(int p,int len,int borderType)748 int cv::borderInterpolate( int p, int len, int borderType )
749 {
750 CV_TRACE_FUNCTION_VERBOSE();
751
752 CV_DbgAssert(len > 0);
753
754 #ifdef CV_STATIC_ANALYSIS
755 if(p >= 0 && p < len)
756 #else
757 if( (unsigned)p < (unsigned)len )
758 #endif
759 ;
760 else if( borderType == BORDER_REPLICATE )
761 p = p < 0 ? 0 : len - 1;
762 else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
763 {
764 int delta = borderType == BORDER_REFLECT_101;
765 if( len == 1 )
766 return 0;
767 do
768 {
769 if( p < 0 )
770 p = -p - 1 + delta;
771 else
772 p = len - 1 - (p - len) - delta;
773 }
774 #ifdef CV_STATIC_ANALYSIS
775 while(p < 0 || p >= len);
776 #else
777 while( (unsigned)p >= (unsigned)len );
778 #endif
779 }
780 else if( borderType == BORDER_WRAP )
781 {
782 CV_Assert(len > 0);
783 if( p < 0 )
784 p -= ((p-len+1)/len)*len;
785 if( p >= len )
786 p %= len;
787 }
788 else if( borderType == BORDER_CONSTANT )
789 p = -1;
790 else
791 CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
792 return p;
793 }
794
795 namespace
796 {
797
copyMakeBorder_8u(const uchar * src,size_t srcstep,cv::Size srcroi,uchar * dst,size_t dststep,cv::Size dstroi,int top,int left,int cn,int borderType)798 void copyMakeBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
799 uchar* dst, size_t dststep, cv::Size dstroi,
800 int top, int left, int cn, int borderType )
801 {
802 const int isz = (int)sizeof(int);
803 int i, j, k, elemSize = 1;
804 bool intMode = false;
805
806 if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
807 {
808 cn /= isz;
809 elemSize = isz;
810 intMode = true;
811 }
812
813 cv::AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
814 int* tab = _tab.data();
815 int right = dstroi.width - srcroi.width - left;
816 int bottom = dstroi.height - srcroi.height - top;
817
818 for( i = 0; i < left; i++ )
819 {
820 j = cv::borderInterpolate(i - left, srcroi.width, borderType)*cn;
821 for( k = 0; k < cn; k++ )
822 tab[i*cn + k] = j + k;
823 }
824
825 for( i = 0; i < right; i++ )
826 {
827 j = cv::borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
828 for( k = 0; k < cn; k++ )
829 tab[(i+left)*cn + k] = j + k;
830 }
831
832 srcroi.width *= cn;
833 dstroi.width *= cn;
834 left *= cn;
835 right *= cn;
836
837 uchar* dstInner = dst + dststep*top + left*elemSize;
838
839 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
840 {
841 if( dstInner != src )
842 memcpy(dstInner, src, srcroi.width*elemSize);
843
844 if( intMode )
845 {
846 const int* isrc = (int*)src;
847 int* idstInner = (int*)dstInner;
848 for( j = 0; j < left; j++ )
849 idstInner[j - left] = isrc[tab[j]];
850 for( j = 0; j < right; j++ )
851 idstInner[j + srcroi.width] = isrc[tab[j + left]];
852 }
853 else
854 {
855 for( j = 0; j < left; j++ )
856 dstInner[j - left] = src[tab[j]];
857 for( j = 0; j < right; j++ )
858 dstInner[j + srcroi.width] = src[tab[j + left]];
859 }
860 }
861
862 dstroi.width *= elemSize;
863 dst += dststep*top;
864
865 for( i = 0; i < top; i++ )
866 {
867 j = cv::borderInterpolate(i - top, srcroi.height, borderType);
868 memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
869 }
870
871 for( i = 0; i < bottom; i++ )
872 {
873 j = cv::borderInterpolate(i + srcroi.height, srcroi.height, borderType);
874 memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
875 }
876 }
877
878
copyMakeConstBorder_8u(const uchar * src,size_t srcstep,cv::Size srcroi,uchar * dst,size_t dststep,cv::Size dstroi,int top,int left,int cn,const uchar * value)879 void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
880 uchar* dst, size_t dststep, cv::Size dstroi,
881 int top, int left, int cn, const uchar* value )
882 {
883 int i, j;
884 cv::AutoBuffer<uchar> _constBuf(dstroi.width*cn);
885 uchar* constBuf = _constBuf.data();
886 int right = dstroi.width - srcroi.width - left;
887 int bottom = dstroi.height - srcroi.height - top;
888
889 for( i = 0; i < dstroi.width; i++ )
890 {
891 for( j = 0; j < cn; j++ )
892 constBuf[i*cn + j] = value[j];
893 }
894
895 srcroi.width *= cn;
896 dstroi.width *= cn;
897 left *= cn;
898 right *= cn;
899
900 uchar* dstInner = dst + dststep*top + left;
901
902 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
903 {
904 if( dstInner != src )
905 memcpy( dstInner, src, srcroi.width );
906 memcpy( dstInner - left, constBuf, left );
907 memcpy( dstInner + srcroi.width, constBuf, right );
908 }
909
910 for( i = 0; i < top; i++ )
911 memcpy(dst + i * dststep, constBuf, dstroi.width);
912
913 dst += (top + srcroi.height) * dststep;
914 for( i = 0; i < bottom; i++ )
915 memcpy(dst + i * dststep, constBuf, dstroi.width);
916 }
917
918 }
919
920 #ifdef HAVE_OPENCL
921
922 namespace cv {
923
ocl_copyMakeBorder(InputArray _src,OutputArray _dst,int top,int bottom,int left,int right,int borderType,const Scalar & value)924 static bool ocl_copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
925 int left, int right, int borderType, const Scalar& value )
926 {
927 int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
928 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
929 bool isolated = (borderType & BORDER_ISOLATED) != 0;
930 borderType &= ~cv::BORDER_ISOLATED;
931
932 if ( !(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE || borderType == BORDER_REFLECT ||
933 borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) ||
934 cn > 4)
935 return false;
936
937 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" };
938 int scalarcn = cn == 3 ? 4 : cn;
939 int sctype = CV_MAKETYPE(depth, scalarcn);
940 String buildOptions = format("-D T=%s -D %s -D T1=%s -D cn=%d -D ST=%s -D rowsPerWI=%d",
941 ocl::memopTypeToStr(type), borderMap[borderType],
942 ocl::memopTypeToStr(depth), cn,
943 ocl::memopTypeToStr(sctype), rowsPerWI);
944
945 ocl::Kernel k("copyMakeBorder", ocl::core::copymakeborder_oclsrc, buildOptions);
946 if (k.empty())
947 return false;
948
949 UMat src = _src.getUMat();
950 if( src.isSubmatrix() && !isolated )
951 {
952 Size wholeSize;
953 Point ofs;
954 src.locateROI(wholeSize, ofs);
955 int dtop = std::min(ofs.y, top);
956 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
957 int dleft = std::min(ofs.x, left);
958 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
959 src.adjustROI(dtop, dbottom, dleft, dright);
960 top -= dtop;
961 left -= dleft;
962 bottom -= dbottom;
963 right -= dright;
964 }
965
966 _dst.create(src.rows + top + bottom, src.cols + left + right, type);
967 UMat dst = _dst.getUMat();
968
969 if (top == 0 && left == 0 && bottom == 0 && right == 0)
970 {
971 if(src.u != dst.u || src.step != dst.step)
972 src.copyTo(dst);
973 return true;
974 }
975
976 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
977 top, left, ocl::KernelArg::Constant(Mat(1, 1, sctype, value)));
978
979 size_t globalsize[2] = { (size_t)dst.cols, ((size_t)dst.rows + rowsPerWI - 1) / rowsPerWI };
980 return k.run(2, globalsize, NULL, false);
981 }
982
983 }
984 #endif
985
986 #ifdef HAVE_IPP
987 namespace cv {
988
ipp_copyMakeBorder(Mat & _src,Mat & _dst,int top,int bottom,int left,int right,int _borderType,const Scalar & value)989 static bool ipp_copyMakeBorder( Mat &_src, Mat &_dst, int top, int bottom,
990 int left, int right, int _borderType, const Scalar& value )
991 {
992 #if defined HAVE_IPP_IW_LL && !IPP_DISABLE_PERF_COPYMAKE
993 CV_INSTRUMENT_REGION_IPP();
994
995 ::ipp::IwiBorderSize borderSize(left, top, right, bottom);
996 ::ipp::IwiSize size(_src.cols, _src.rows);
997 IppDataType dataType = ippiGetDataType(_src.depth());
998 IppiBorderType borderType = ippiGetBorderType(_borderType);
999 if((int)borderType == -1)
1000 return false;
1001
1002 if(_src.dims > 2)
1003 return false;
1004
1005 Rect dstRect(borderSize.left, borderSize.top,
1006 _dst.cols - borderSize.right - borderSize.left,
1007 _dst.rows - borderSize.bottom - borderSize.top);
1008 Mat subDst = Mat(_dst, dstRect);
1009 Mat *pSrc = &_src;
1010
1011 return CV_INSTRUMENT_FUN_IPP(llwiCopyMakeBorder, pSrc->ptr(), pSrc->step, subDst.ptr(), subDst.step, size, dataType, _src.channels(), borderSize, borderType, &value[0]) >= 0;
1012 #else
1013 CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(top); CV_UNUSED(bottom); CV_UNUSED(left); CV_UNUSED(right);
1014 CV_UNUSED(_borderType); CV_UNUSED(value);
1015 return false;
1016 #endif
1017 }
1018 }
1019 #endif
1020
copyMakeBorder(InputArray _src,OutputArray _dst,int top,int bottom,int left,int right,int borderType,const Scalar & value)1021 void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1022 int left, int right, int borderType, const Scalar& value )
1023 {
1024 CV_INSTRUMENT_REGION();
1025
1026 CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 && _src.dims() <= 2);
1027
1028 CV_OCL_RUN(_dst.isUMat(),
1029 ocl_copyMakeBorder(_src, _dst, top, bottom, left, right, borderType, value))
1030
1031 Mat src = _src.getMat();
1032 int type = src.type();
1033
1034 if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
1035 {
1036 Size wholeSize;
1037 Point ofs;
1038 src.locateROI(wholeSize, ofs);
1039 int dtop = std::min(ofs.y, top);
1040 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1041 int dleft = std::min(ofs.x, left);
1042 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1043 src.adjustROI(dtop, dbottom, dleft, dright);
1044 top -= dtop;
1045 left -= dleft;
1046 bottom -= dbottom;
1047 right -= dright;
1048 }
1049
1050 _dst.create( src.rows + top + bottom, src.cols + left + right, type );
1051 Mat dst = _dst.getMat();
1052
1053 if(top == 0 && left == 0 && bottom == 0 && right == 0)
1054 {
1055 if(src.data != dst.data || src.step != dst.step)
1056 src.copyTo(dst);
1057 return;
1058 }
1059
1060 borderType &= ~BORDER_ISOLATED;
1061
1062 CV_IPP_RUN_FAST(ipp_copyMakeBorder(src, dst, top, bottom, left, right, borderType, value))
1063
1064 if( borderType != BORDER_CONSTANT )
1065 copyMakeBorder_8u( src.ptr(), src.step, src.size(),
1066 dst.ptr(), dst.step, dst.size(),
1067 top, left, (int)src.elemSize(), borderType );
1068 else
1069 {
1070 int cn = src.channels(), cn1 = cn;
1071 AutoBuffer<double> buf(cn);
1072 if( cn > 4 )
1073 {
1074 CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
1075 cn1 = 1;
1076 }
1077 scalarToRawData(value, buf.data(), CV_MAKETYPE(src.depth(), cn1), cn);
1078 copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
1079 dst.ptr(), dst.step, dst.size(),
1080 top, left, (int)src.elemSize(), (uchar*)buf.data() );
1081 }
1082 }
1083
1084
1085 #ifndef OPENCV_EXCLUDE_C_API
1086
1087 /* dst = src */
1088 CV_IMPL void
cvCopy(const void * srcarr,void * dstarr,const void * maskarr)1089 cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
1090 {
1091 if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
1092 {
1093 CV_Assert( maskarr == 0 );
1094 CvSparseMat* src1 = (CvSparseMat*)srcarr;
1095 CvSparseMat* dst1 = (CvSparseMat*)dstarr;
1096 CvSparseMatIterator iterator;
1097 CvSparseNode* node;
1098
1099 dst1->dims = src1->dims;
1100 memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
1101 dst1->valoffset = src1->valoffset;
1102 dst1->idxoffset = src1->idxoffset;
1103 cvClearSet( dst1->heap );
1104
1105 if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
1106 {
1107 cvFree( &dst1->hashtable );
1108 dst1->hashsize = src1->hashsize;
1109 dst1->hashtable =
1110 (void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
1111 }
1112
1113 memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));
1114
1115 for( node = cvInitSparseMatIterator( src1, &iterator );
1116 node != 0; node = cvGetNextSparseNode( &iterator ))
1117 {
1118 CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
1119 int tabidx = node->hashval & (dst1->hashsize - 1);
1120 memcpy( node_copy, node, dst1->heap->elem_size );
1121 node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
1122 dst1->hashtable[tabidx] = node_copy;
1123 }
1124 return;
1125 }
1126 cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
1127 CV_Assert( src.depth() == dst.depth() && src.size == dst.size );
1128
1129 int coi1 = 0, coi2 = 0;
1130 if( CV_IS_IMAGE(srcarr) )
1131 coi1 = cvGetImageCOI((const IplImage*)srcarr);
1132 if( CV_IS_IMAGE(dstarr) )
1133 coi2 = cvGetImageCOI((const IplImage*)dstarr);
1134
1135 if( coi1 || coi2 )
1136 {
1137 CV_Assert( (coi1 != 0 || src.channels() == 1) &&
1138 (coi2 != 0 || dst.channels() == 1) );
1139
1140 int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
1141 cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
1142 return;
1143 }
1144 else
1145 CV_Assert( src.channels() == dst.channels() );
1146
1147 if( !maskarr )
1148 src.copyTo(dst);
1149 else
1150 src.copyTo(dst, cv::cvarrToMat(maskarr));
1151 }
1152
1153 CV_IMPL void
cvSet(void * arr,CvScalar value,const void * maskarr)1154 cvSet( void* arr, CvScalar value, const void* maskarr )
1155 {
1156 cv::Mat m = cv::cvarrToMat(arr);
1157 if( !maskarr )
1158 m = value;
1159 else
1160 m.setTo(cv::Scalar(value), cv::cvarrToMat(maskarr));
1161 }
1162
1163 CV_IMPL void
cvSetZero(CvArr * arr)1164 cvSetZero( CvArr* arr )
1165 {
1166 if( CV_IS_SPARSE_MAT(arr) )
1167 {
1168 CvSparseMat* mat1 = (CvSparseMat*)arr;
1169 cvClearSet( mat1->heap );
1170 if( mat1->hashtable )
1171 memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0]));
1172 return;
1173 }
1174 cv::Mat m = cv::cvarrToMat(arr);
1175 m = cv::Scalar(0);
1176 }
1177
1178 CV_IMPL void
cvFlip(const CvArr * srcarr,CvArr * dstarr,int flip_mode)1179 cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode )
1180 {
1181 cv::Mat src = cv::cvarrToMat(srcarr);
1182 cv::Mat dst;
1183
1184 if (!dstarr)
1185 dst = src;
1186 else
1187 dst = cv::cvarrToMat(dstarr);
1188
1189 CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
1190 cv::flip( src, dst, flip_mode );
1191 }
1192
1193 CV_IMPL void
cvRepeat(const CvArr * srcarr,CvArr * dstarr)1194 cvRepeat( const CvArr* srcarr, CvArr* dstarr )
1195 {
1196 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1197 CV_Assert( src.type() == dst.type() &&
1198 dst.rows % src.rows == 0 && dst.cols % src.cols == 0 );
1199 cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst);
1200 }
1201
1202 #endif // OPENCV_EXCLUDE_C_API
1203 /* End of file. */
1204