1 /*
2 * By downloading, copying, installing or using the software you agree to this license.
3 * If you do not agree to this license, do not download, install,
4 * copy or use the software.
5 *
6 *
7 * License Agreement
8 * For Open Source Computer Vision Library
9 * (3-clause BSD License)
10 *
11 * Copyright (C) 2015, NVIDIA Corporation, all rights reserved.
12 * Third party copyrights are property of their respective owners.
13 *
14 * Redistribution and use in source and binary forms, with or without modification,
15 * are permitted provided that the following conditions are met:
16 *
17 * * Redistributions of source code must retain the above copyright notice,
18 * this list of conditions and the following disclaimer.
19 *
20 * * Redistributions in binary form must reproduce the above copyright notice,
21 * this list of conditions and the following disclaimer in the documentation
22 * and/or other materials provided with the distribution.
23 *
24 * * Neither the names of the copyright holders nor the names of the contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * This software is provided by the copyright holders and contributors "as is" and
29 * any express or implied warranties, including, but not limited to, the implied
30 * warranties of merchantability and fitness for a particular purpose are disclaimed.
31 * In no event shall copyright holders or contributors be liable for any direct,
32 * indirect, incidental, special, exemplary, or consequential damages
33 * (including, but not limited to, procurement of substitute goods or services;
34 * loss of use, data, or profits; or business interruption) however caused
35 * and on any theory of liability, whether in contract, strict liability,
36 * or tort (including negligence or otherwise) arising in any way out of
37 * the use of this software, even if advised of the possibility of such damage.
38 */
39
40 #include "remap.hpp"
41
42 namespace CAROTENE_NS {
43
isWarpAffineNearestNeighborSupported(const Size2D & ssize)44 bool isWarpAffineNearestNeighborSupported(const Size2D &ssize)
45 {
46 #if SIZE_MAX > UINT32_MAX
47 return !(ssize.width > 0xffffFFFF || ssize.height > 0xffffFFFF) && // Restrict image size since internal index evaluation
48 // is performed with u32
49 isSupportedConfiguration();
50 #else
51 (void)ssize;
52 return isSupportedConfiguration();
53 #endif
54 }
55
isWarpAffineLinearSupported(const Size2D & ssize)56 bool isWarpAffineLinearSupported(const Size2D &ssize)
57 {
58 #if SIZE_MAX > UINT32_MAX
59 return !(ssize.width > 0xffffFFFF || ssize.height > 0xffffFFFF) && // Restrict image size since internal index evaluation
60 // is performed with u32
61 isSupportedConfiguration();
62 #else
63 (void)ssize;
64 return isSupportedConfiguration();
65 #endif
66 }
67
warpAffineNearestNeighbor(const Size2D & ssize,const Size2D & dsize,const u8 * srcBase,ptrdiff_t srcStride,const f32 * m,u8 * dstBase,ptrdiff_t dstStride,BORDER_MODE borderMode,u8 borderValue)68 void warpAffineNearestNeighbor(const Size2D &ssize, const Size2D &dsize,
69 const u8 * srcBase, ptrdiff_t srcStride,
70 const f32 * m,
71 u8 * dstBase, ptrdiff_t dstStride,
72 BORDER_MODE borderMode, u8 borderValue)
73 {
74 internal::assertSupportedConfiguration(isWarpAffineNearestNeighborSupported(ssize));
75 #ifdef CAROTENE_NEON
76 using namespace internal;
77
78 s32 _map[BLOCK_SIZE * BLOCK_SIZE + 16];
79 s32 * map = alignPtr(_map, 16);
80
81 int32x4_t v_width4 = vdupq_n_s32(ssize.width - 1), v_height4 = vdupq_n_s32(ssize.height - 1);
82 int32x4_t v_step4 = vdupq_n_s32(srcStride);
83 float32x4_t v_4 = vdupq_n_f32(4.0f);
84
85 float32x4_t v_m0 = vdupq_n_f32(m[0]);
86 float32x4_t v_m1 = vdupq_n_f32(m[1]);
87 float32x4_t v_m2 = vdupq_n_f32(m[2]);
88 float32x4_t v_m3 = vdupq_n_f32(m[3]);
89 float32x4_t v_m4 = vdupq_n_f32(m[4]);
90 float32x4_t v_m5 = vdupq_n_f32(m[5]);
91
92 if (borderMode == BORDER_MODE_REPLICATE)
93 {
94 int32x4_t v_zero4 = vdupq_n_s32(0);
95
96 for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
97 {
98 size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
99 for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
100 {
101 size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
102
103 // compute table
104 for (size_t y = 0; y < blockHeight; ++y)
105 {
106 s32 * map_row = getRowPtr(&map[0], blockWidth * sizeof(s32), y);
107
108 size_t x = 0, y_ = y + i;
109 f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
110 float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_);
111 float32x4_t v_yx = vmlaq_f32(v_m4, v_m2, v_y), v_yy = vmlaq_f32(v_m5, v_m3, v_y);
112
113 for ( ; x + 4 <= blockWidth; x += 4)
114 {
115 float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
116 float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
117
118 int32x4_t v_src_x = vmaxq_s32(v_zero4, vminq_s32(v_width4, vcvtq_s32_f32(v_src_xf)));
119 int32x4_t v_src_y = vmaxq_s32(v_zero4, vminq_s32(v_height4, vcvtq_s32_f32(v_src_yf)));
120 int32x4_t v_src_index = vmlaq_s32(v_src_x, v_src_y, v_step4);
121 vst1q_s32(map_row + x, v_src_index);
122
123 v_x = vaddq_f32(v_x, v_4);
124 }
125
126 f32 yx = m[2] * y_ + m[4], yy = m[3] * y_ + m[5];
127 for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
128 {
129 f32 src_x_f = m[0] * x_ + yx;
130 f32 src_y_f = m[1] * x_ + yy;
131 s32 src_x = floorf(src_x_f), src_y = floorf(src_y_f);
132
133 src_x = std::max(0, std::min<s32>(ssize.width - 1, src_x));
134 src_y = std::max(0, std::min<s32>(ssize.height - 1, src_y));
135 map_row[x] = src_y * srcStride + src_x;
136 }
137 }
138
139 // make remap
140 remapNearestNeighborReplicate(Size2D(blockWidth, blockHeight), srcBase, &map[0],
141 getRowPtr(dstBase, dstStride, i) + j, dstStride);
142 }
143 }
144 }
145 else if (borderMode == BORDER_MODE_CONSTANT)
146 {
147 int32x4_t v_m1_4 = vdupq_n_s32(-1);
148 float32x4_t v_zero4 = vdupq_n_f32(0.0f);
149
150 for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
151 {
152 size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
153 for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
154 {
155 size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
156
157 // compute table
158 for (size_t y = 0; y < blockHeight; ++y)
159 {
160 s32 * map_row = getRowPtr(&map[0], blockWidth * sizeof(s32), y);
161
162 size_t x = 0, y_ = y + i;
163 f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
164 float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_);
165 float32x4_t v_yx = vmlaq_f32(v_m4, v_m2, v_y), v_yy = vmlaq_f32(v_m5, v_m3, v_y);
166
167 for ( ; x + 4 <= blockWidth; x += 4)
168 {
169 float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
170 float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
171
172 int32x4_t v_src_x = vcvtq_s32_f32(v_src_xf);
173 int32x4_t v_src_y = vcvtq_s32_f32(v_src_yf);
174 uint32x4_t v_mask = vandq_u32(vandq_u32(vcgeq_f32(v_src_xf, v_zero4), vcleq_s32(v_src_x, v_width4)),
175 vandq_u32(vcgeq_f32(v_src_yf, v_zero4), vcleq_s32(v_src_y, v_height4)));
176 int32x4_t v_src_index = vbslq_s32(v_mask, vmlaq_s32(v_src_x, v_src_y, v_step4), v_m1_4);
177 vst1q_s32(map_row + x, v_src_index);
178
179 v_x = vaddq_f32(v_x, v_4);
180 }
181
182 f32 yx = m[2] * y_ + m[4], yy = m[3] * y_ + m[5];
183 for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
184 {
185 f32 src_x_f = m[0] * x_ + yx;
186 f32 src_y_f = m[1] * x_ + yy;
187 s32 src_x = floorf(src_x_f), src_y = floorf(src_y_f);
188
189 map_row[x] = (src_x >= 0) && (src_x < (s32)ssize.width) &&
190 (src_y >= 0) && (src_y < (s32)ssize.height) ? src_y * srcStride + src_x : -1;
191 }
192 }
193
194 // make remap
195 remapNearestNeighborConst(Size2D(blockWidth, blockHeight), srcBase, &map[0],
196 getRowPtr(dstBase, dstStride, i) + j, dstStride, borderValue);
197 }
198 }
199 }
200 #else
201 (void)ssize;
202 (void)dsize;
203 (void)srcBase;
204 (void)srcStride;
205 (void)m;
206 (void)dstBase;
207 (void)dstStride;
208 (void)borderMode;
209 (void)borderValue;
210 #endif
211 }
212
warpAffineLinear(const Size2D & ssize,const Size2D & dsize,const u8 * srcBase,ptrdiff_t srcStride,const f32 * m,u8 * dstBase,ptrdiff_t dstStride,BORDER_MODE borderMode,u8 borderValue)213 void warpAffineLinear(const Size2D &ssize, const Size2D &dsize,
214 const u8 * srcBase, ptrdiff_t srcStride,
215 const f32 * m,
216 u8 * dstBase, ptrdiff_t dstStride,
217 BORDER_MODE borderMode, u8 borderValue)
218 {
219 internal::assertSupportedConfiguration(isWarpAffineLinearSupported(ssize));
220 #ifdef CAROTENE_NEON
221 using namespace internal;
222
223 s32 _map[((BLOCK_SIZE * BLOCK_SIZE) << 2) + 16];
224 f32 _coeffs[((BLOCK_SIZE * BLOCK_SIZE) << 1) + 16];
225 s32 * map = alignPtr(_map, 16);
226 f32 * coeffs = alignPtr(_coeffs, 16);
227
228 int32x4_t v_width4 = vdupq_n_s32(ssize.width - 1), v_height4 = vdupq_n_s32(ssize.height - 1);
229 int32x4_t v_step4 = vdupq_n_s32(srcStride), v_1 = vdupq_n_s32(1);
230 float32x4_t v_zero4f = vdupq_n_f32(0.0f), v_one4f = vdupq_n_f32(1.0f);
231
232 float32x4_t v_m0 = vdupq_n_f32(m[0]);
233 float32x4_t v_m1 = vdupq_n_f32(m[1]);
234 float32x4_t v_m2 = vdupq_n_f32(m[2]);
235 float32x4_t v_m3 = vdupq_n_f32(m[3]);
236 float32x4_t v_m4 = vdupq_n_f32(m[4]);
237 float32x4_t v_m5 = vdupq_n_f32(m[5]);
238
239 if (borderMode == BORDER_MODE_REPLICATE)
240 {
241 int32x4_t v_zero4 = vdupq_n_s32(0);
242
243 for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
244 {
245 size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
246 for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
247 {
248 size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
249
250 // compute table
251 for (size_t y = 0; y < blockHeight; ++y)
252 {
253 s32 * map_row = getRowPtr(map, blockWidth * sizeof(s32) * 4, y);
254 f32 * coeff_row = getRowPtr(coeffs, blockWidth * sizeof(f32) * 2, y);
255
256 size_t x = 0, y_ = y + i;
257 f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
258 float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_), v_4 = vdupq_n_f32(4.0f);
259 float32x4_t v_yx = vmlaq_f32(v_m4, v_m2, v_y), v_yy = vmlaq_f32(v_m5, v_m3, v_y);
260
261 for ( ; x + 4 <= blockWidth; x += 4)
262 {
263 float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
264 float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
265
266 int32x4_t v_src_x = vcvtq_s32_f32(v_src_xf);
267 int32x4_t v_src_y = vcvtq_s32_f32(v_src_yf);
268
269 float32x4x2_t v_coeff;
270 v_coeff.val[0] = vsubq_f32(v_src_xf, vcvtq_f32_s32(v_src_x));
271 v_coeff.val[1] = vsubq_f32(v_src_yf, vcvtq_f32_s32(v_src_y));
272 uint32x4_t v_maskx = vcltq_f32(v_coeff.val[0], v_zero4f);
273 uint32x4_t v_masky = vcltq_f32(v_coeff.val[1], v_zero4f);
274 v_coeff.val[0] = vbslq_f32(v_maskx, vaddq_f32(v_one4f, v_coeff.val[0]), v_coeff.val[0]);
275 v_coeff.val[1] = vbslq_f32(v_masky, vaddq_f32(v_one4f, v_coeff.val[1]), v_coeff.val[1]);
276 v_src_x = vbslq_s32(v_maskx, vsubq_s32(v_src_x, v_1), v_src_x);
277 v_src_y = vbslq_s32(v_masky, vsubq_s32(v_src_y, v_1), v_src_y);
278
279 int32x4_t v_dst0_x = vmaxq_s32(v_zero4, vminq_s32(v_width4, v_src_x));
280 int32x4_t v_dst0_y = vmaxq_s32(v_zero4, vminq_s32(v_height4, v_src_y));
281 int32x4_t v_dst1_x = vmaxq_s32(v_zero4, vminq_s32(v_width4, vaddq_s32(v_1, v_src_x)));
282 int32x4_t v_dst1_y = vmaxq_s32(v_zero4, vminq_s32(v_height4, vaddq_s32(v_1, v_src_y)));
283
284 int32x4x4_t v_dst_index;
285 v_dst_index.val[0] = vmlaq_s32(v_dst0_x, v_dst0_y, v_step4);
286 v_dst_index.val[1] = vmlaq_s32(v_dst1_x, v_dst0_y, v_step4);
287 v_dst_index.val[2] = vmlaq_s32(v_dst0_x, v_dst1_y, v_step4);
288 v_dst_index.val[3] = vmlaq_s32(v_dst1_x, v_dst1_y, v_step4);
289
290 vst2q_f32(coeff_row + (x << 1), v_coeff);
291 vst4q_s32(map_row + (x << 2), v_dst_index);
292
293 v_x = vaddq_f32(v_x, v_4);
294 }
295
296 f32 yx = m[2] * y_ + m[4], yy = m[3] * y_ + m[5];
297 for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
298 {
299 f32 src_x_f = m[0] * x_ + yx;
300 f32 src_y_f = m[1] * x_ + yy;
301
302 s32 src0_x = (s32)floorf(src_x_f);
303 s32 src0_y = (s32)floorf(src_y_f);
304
305 coeff_row[(x << 1) + 0] = src_x_f - src0_x;
306 coeff_row[(x << 1) + 1] = src_y_f - src0_y;
307
308 s32 src1_y = std::max(0, std::min<s32>(ssize.height - 1, src0_y + 1));
309 src0_y = std::max(0, std::min<s32>(ssize.height - 1, src0_y));
310 s32 src1_x = std::max(0, std::min<s32>(ssize.width - 1, src0_x + 1));
311 src0_x = std::max(0, std::min<s32>(ssize.width - 1, src0_x));
312
313 map_row[(x << 2) + 0] = src0_y * srcStride + src0_x;
314 map_row[(x << 2) + 1] = src0_y * srcStride + src1_x;
315 map_row[(x << 2) + 2] = src1_y * srcStride + src0_x;
316 map_row[(x << 2) + 3] = src1_y * srcStride + src1_x;
317 }
318 }
319
320 remapLinearReplicate(Size2D(blockWidth, blockHeight),
321 srcBase, &map[0], &coeffs[0],
322 getRowPtr(dstBase, dstStride, i) + j, dstStride);
323 }
324 }
325 }
326 else if (borderMode == BORDER_MODE_CONSTANT)
327 {
328 float32x4_t v_zero4 = vdupq_n_f32(0.0f);
329 int32x4_t v_m1_4 = vdupq_n_s32(-1);
330
331 for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
332 {
333 size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
334 for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
335 {
336 size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
337
338 // compute table
339 for (size_t y = 0; y < blockHeight; ++y)
340 {
341 s32 * map_row = getRowPtr(map, blockWidth * sizeof(s32) * 4, y);
342 f32 * coeff_row = getRowPtr(coeffs, blockWidth * sizeof(f32) * 2, y);
343
344 size_t x = 0, y_ = y + i;
345 f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
346 float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_), v_4 = vdupq_n_f32(4.0f);
347 float32x4_t v_yx = vmlaq_f32(v_m4, v_m2, v_y), v_yy = vmlaq_f32(v_m5, v_m3, v_y);
348
349 for ( ; x + 4 <= blockWidth; x += 4)
350 {
351 float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
352 float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
353
354 int32x4_t v_src_x0 = vcvtq_s32_f32(v_src_xf);
355 int32x4_t v_src_y0 = vcvtq_s32_f32(v_src_yf);
356
357 float32x4x2_t v_coeff;
358 v_coeff.val[0] = vsubq_f32(v_src_xf, vcvtq_f32_s32(v_src_x0));
359 v_coeff.val[1] = vsubq_f32(v_src_yf, vcvtq_f32_s32(v_src_y0));
360 uint32x4_t v_maskx = vcltq_f32(v_coeff.val[0], v_zero4f);
361 uint32x4_t v_masky = vcltq_f32(v_coeff.val[1], v_zero4f);
362 v_coeff.val[0] = vbslq_f32(v_maskx, vaddq_f32(v_one4f, v_coeff.val[0]), v_coeff.val[0]);
363 v_coeff.val[1] = vbslq_f32(v_masky, vaddq_f32(v_one4f, v_coeff.val[1]), v_coeff.val[1]);
364 v_src_x0 = vbslq_s32(v_maskx, vsubq_s32(v_src_x0, v_1), v_src_x0);
365 v_src_y0 = vbslq_s32(v_masky, vsubq_s32(v_src_y0, v_1), v_src_y0);
366
367 int32x4_t v_src_x1 = vaddq_s32(v_src_x0, v_1);
368 int32x4_t v_src_y1 = vaddq_s32(v_src_y0, v_1);
369
370 int32x4x4_t v_dst_index;
371 v_dst_index.val[0] = vmlaq_s32(v_src_x0, v_src_y0, v_step4);
372 v_dst_index.val[1] = vmlaq_s32(v_src_x1, v_src_y0, v_step4);
373 v_dst_index.val[2] = vmlaq_s32(v_src_x0, v_src_y1, v_step4);
374 v_dst_index.val[3] = vmlaq_s32(v_src_x1, v_src_y1, v_step4);
375
376 uint32x4_t v_mask_x0 = vandq_u32(vcgeq_f32(v_src_xf, v_zero4), vcleq_s32(v_src_x0, v_width4));
377 uint32x4_t v_mask_x1 = vandq_u32(vcgeq_f32(vaddq_f32(v_src_xf, v_one4f), v_zero4), vcleq_s32(v_src_x1, v_width4));
378 uint32x4_t v_mask_y0 = vandq_u32(vcgeq_f32(v_src_yf, v_zero4), vcleq_s32(v_src_y0, v_height4));
379 uint32x4_t v_mask_y1 = vandq_u32(vcgeq_f32(vaddq_f32(v_src_yf, v_one4f), v_zero4), vcleq_s32(v_src_y1, v_height4));
380
381 v_dst_index.val[0] = vbslq_s32(vandq_u32(v_mask_x0, v_mask_y0), v_dst_index.val[0], v_m1_4);
382 v_dst_index.val[1] = vbslq_s32(vandq_u32(v_mask_x1, v_mask_y0), v_dst_index.val[1], v_m1_4);
383 v_dst_index.val[2] = vbslq_s32(vandq_u32(v_mask_x0, v_mask_y1), v_dst_index.val[2], v_m1_4);
384 v_dst_index.val[3] = vbslq_s32(vandq_u32(v_mask_x1, v_mask_y1), v_dst_index.val[3], v_m1_4);
385
386 vst2q_f32(coeff_row + (x << 1), v_coeff);
387 vst4q_s32(map_row + (x << 2), v_dst_index);
388
389 v_x = vaddq_f32(v_x, v_4);
390 }
391
392 f32 yx = m[2] * y_ + m[4], yy = m[3] * y_ + m[5];
393 for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
394 {
395 f32 src_x_f = m[0] * x_ + yx;
396 f32 src_y_f = m[1] * x_ + yy;
397
398 s32 src0_x = (s32)floorf(src_x_f), src1_x = src0_x + 1;
399 s32 src0_y = (s32)floorf(src_y_f), src1_y = src0_y + 1;
400
401 coeff_row[(x << 1) + 0] = src_x_f - src0_x;
402 coeff_row[(x << 1) + 1] = src_y_f - src0_y;
403
404 map_row[(x << 2) + 0] = (src0_x >= 0) && (src0_x < (s32)ssize.width) &&
405 (src0_y >= 0) && (src0_y < (s32)ssize.height) ? src0_y * srcStride + src0_x : -1;
406 map_row[(x << 2) + 1] = (src1_x >= 0) && (src1_x < (s32)ssize.width) &&
407 (src0_y >= 0) && (src0_y < (s32)ssize.height) ? src0_y * srcStride + src1_x : -1;
408 map_row[(x << 2) + 2] = (src0_x >= 0) && (src0_x < (s32)ssize.width) &&
409 (src1_y >= 0) && (src1_y < (s32)ssize.height) ? src1_y * srcStride + src0_x : -1;
410 map_row[(x << 2) + 3] = (src1_x >= 0) && (src1_x < (s32)ssize.width) &&
411 (src1_y >= 0) && (src1_y < (s32)ssize.height) ? src1_y * srcStride + src1_x : -1;
412 }
413 }
414
415 remapLinearConst(Size2D(blockWidth, blockHeight),
416 srcBase, &map[0], &coeffs[0],
417 getRowPtr(dstBase, dstStride, i) + j, dstStride, borderValue);
418 }
419 }
420 }
421 #else
422 (void)ssize;
423 (void)dsize;
424 (void)srcBase;
425 (void)srcStride;
426 (void)m;
427 (void)dstBase;
428 (void)dstStride;
429 (void)borderMode;
430 (void)borderValue;
431 #endif
432 }
433
434 } // namespace CAROTENE_NS
435