1 /*
2  * Copyright (C) 2005-2011 Francois Meyer (dulle at free.fr)
3  * Copyright (C) 2012-2015 team free-astro (see more in AUTHORS file)
4  * Reference site is https://free-astro.org/index.php/Siril
5  *
6  * Siril is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * Siril is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with Siril. If not, see <http://www.gnu.org/licenses/>.
18  *
19  * Useful links about OpenCV:
20  * http://docs.opencv.org/modules/core/doc/intro.html
21  * http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#resize
22  */
23 #ifdef HAVE_CONFIG_H
24 #  include <config.h>
25 #endif
26 #include <assert.h>
27 #include <iostream>
28 #include <iomanip>
29 #include <opencv2/core/core.hpp>
30 #include <opencv2/imgproc/imgproc.hpp>
31 #include "opencv2/core/version.hpp"
32 #if CV_MAJOR_VERSION == 2
33 #include "opencv/findHomography/calib3d.hpp"
34 #else
35 #if CV_MAJOR_VERSION == 4
36 #define CV_RANSAC FM_RANSAC
37 #endif
38 #include <opencv2/calib3d.hpp>
39 #endif
40 
41 #include "core/siril.h"
42 #include "core/proto.h"
43 #include "registration/matching/misc.h"
44 #include "registration/matching/atpmatch.h"
45 #include "opencv.h"
46 #include "opencv/ecc/ecc.h"
47 
48 #ifdef __cplusplus
49 extern "C" {
50 #endif
51 #include "algos/statistics.h"
52 #ifdef __cplusplus
53 }
54 #endif
55 
56 #define defaultRANSACReprojThreshold 3
57 
58 using namespace cv;
59 
60 /* TODO:
61  * fix memory leak
62  */
63 
fits_to_bgrbgr_ushort(fits * image)64 static WORD *fits_to_bgrbgr_ushort(fits *image) {
65 	size_t ndata = image->rx * image->ry * 3;
66 	WORD *bgrbgr = (WORD *)malloc(ndata * sizeof(WORD));
67 	if (!bgrbgr) { PRINT_ALLOC_ERR; return NULL; }
68 	for (size_t i = 0, j = 0; i < ndata; i += 3, j++) {
69 		bgrbgr[i + 0] = image->pdata[BLAYER][j];
70 		bgrbgr[i + 1] = image->pdata[GLAYER][j];
71 		bgrbgr[i + 2] = image->pdata[RLAYER][j];
72 	}
73 	return bgrbgr;
74 }
75 
fits_to_bgrbgr_float(fits * image)76 static float *fits_to_bgrbgr_float(fits *image) {
77 	size_t ndata = image->rx * image->ry * 3;
78 	float *bgrbgr = (float *)malloc(ndata * sizeof(float));
79 	if (!bgrbgr) { PRINT_ALLOC_ERR; return NULL; }
80 	for (size_t i = 0, j = 0; i < ndata; i += 3, j++) {
81 		bgrbgr[i + 0] = image->fpdata[BLAYER][j];
82 		bgrbgr[i + 1] = image->fpdata[GLAYER][j];
83 		bgrbgr[i + 2] = image->fpdata[RLAYER][j];
84 	}
85 	return bgrbgr;
86 }
87 
fits8_to_bgrbgr(fits * image)88 static BYTE *fits8_to_bgrbgr(fits *image) {
89 	size_t ndata = image->rx * image->ry * 3;
90 	BYTE *bgrbgr = (BYTE *)malloc(ndata * sizeof(BYTE));
91 	if (!bgrbgr) { PRINT_ALLOC_ERR; return NULL; }
92 	for (size_t i = 0, j = 0; i < ndata; i += 3, j++) {
93 		bgrbgr[i + 0] = (BYTE)image->pdata[BLAYER][j];
94 		bgrbgr[i + 1] = (BYTE)image->pdata[GLAYER][j];
95 		bgrbgr[i + 2] = (BYTE)image->pdata[RLAYER][j];
96 	}
97 	return bgrbgr;
98 }
99 
100 /* this prepares input and output images, but lets the input in a non-usable state, beware! */
image_to_Mat(fits * image,Mat * in,Mat * out,void ** bgr,int target_rx,int target_ry)101 static int image_to_Mat(fits *image, Mat *in, Mat *out, void **bgr, int target_rx, int target_ry) {
102 	if (image->naxes[2] != 1 && image->naxes[2] != 3) {
103 		siril_log_message(_("Images with %ld channels are not supported\n"), image->naxes[2]);
104 		return -1;
105 	}
106 	if (image->type == DATA_USHORT) {
107 		if (image->naxes[2] == 1) {
108 			*in = Mat(image->ry, image->rx, CV_16UC1, image->data);
109 			WORD *newbuf = (WORD *)calloc(target_rx * target_ry, sizeof(WORD));
110 			if (!newbuf) {
111 				PRINT_ALLOC_ERR;
112 				return -1;
113 			}
114 			*out = Mat(target_ry, target_rx, CV_16UC1, newbuf);
115 		}
116 		else if (image->naxes[2] == 3) {
117 			WORD *bgr_u = fits_to_bgrbgr_ushort(image);
118 			if (!bgr_u) return -1;
119 			free(image->data);
120 			image->data = NULL;
121 			memset(image->pdata, 0, sizeof image->pdata);
122 			*in = Mat(image->ry, image->rx, CV_16UC3, bgr_u);
123 			*out = Mat(target_ry, target_rx, CV_16UC3, Scalar(0));
124 			*bgr = bgr_u;
125 		}
126 	}
127 	else if (image->type == DATA_FLOAT) {
128 		if (image->naxes[2] == 1) {
129 			*in = Mat(image->ry, image->rx, CV_32FC1, image->fdata);
130 			float *newbuf = (float *)calloc(target_rx * target_ry, sizeof(float));
131 			if (!newbuf) {
132 				PRINT_ALLOC_ERR;
133 				return -1;
134 			}
135 			*out = Mat(target_ry, target_rx, CV_32FC1, newbuf);
136 		}
137 		else if (image->naxes[2] == 3) {
138 			float *bgr_f = fits_to_bgrbgr_float(image);
139 			if (!bgr_f) return -1;
140 			free(image->fdata);
141 			image->fdata = NULL;
142 			memset(image->fpdata, 0, sizeof image->fpdata);
143 			*in = Mat(image->ry, image->rx, CV_32FC3, bgr_f);
144 			*out = Mat(target_ry, target_rx, CV_32FC3, Scalar(0.0f));
145 			*bgr = bgr_f;
146 		}
147 	}
148 	else return -1;
149 	return 0;
150 }
151 
Mat_to_image(fits * image,Mat * in,Mat * out,void * bgr,int target_rx,int target_ry)152 static int Mat_to_image(fits *image, Mat *in, Mat *out, void *bgr, int target_rx, int target_ry) {
153 	in->release();
154 	if (bgr) free(bgr);
155 	if (image->naxes[2] == 1)
156 		free(image->data);
157 
158 	size_t ndata = target_rx * target_ry;
159 	if (image->type == DATA_USHORT) {
160 		if (image->naxes[2] == 3) {
161 			size_t data_size = ndata * sizeof(WORD);
162 			// normally image->data is NULL here, as done in image_to_Mat
163 			WORD *newdata = (WORD*) realloc(image->data, data_size * image->naxes[2]);
164 			if (!newdata) {
165 				PRINT_ALLOC_ERR;
166 				out->release();
167 				return 1;
168 			}
169 			image->data = newdata;
170 			image->pdata[RLAYER] = image->data;
171 			image->pdata[GLAYER] = image->data + ndata;
172 			image->pdata[BLAYER] = image->data + ndata * 2;
173 
174 			Mat channel[3];
175 			channel[0] = Mat(target_ry, target_rx, CV_16UC1, image->pdata[BLAYER]);
176 			channel[1] = Mat(target_ry, target_rx, CV_16UC1, image->pdata[GLAYER]);
177 			channel[2] = Mat(target_ry, target_rx, CV_16UC1, image->pdata[RLAYER]);
178 
179 			split(*out, channel);
180 
181 			channel[0].release();
182 			channel[1].release();
183 			channel[2].release();
184 		} else {
185 			image->data = (WORD *)out->data;
186 			image->pdata[RLAYER] = image->data;
187 			image->pdata[GLAYER] = image->data;
188 			image->pdata[BLAYER] = image->data;
189 		}
190 		out->release();
191 	} else {
192 		if (image->naxes[2] == 3) {
193 			size_t data_size = ndata * sizeof(float);
194 			float *newdata = (float *) realloc(image->fdata, data_size * image->naxes[2]);
195 			if (!newdata) {
196 				PRINT_ALLOC_ERR;
197 				out->release();
198 				return 1;
199 			}
200 			image->fdata = newdata;
201 			image->fpdata[RLAYER] = image->fdata;
202 			image->fpdata[GLAYER] = image->fdata + ndata;
203 			image->fpdata[BLAYER] = image->fdata + ndata * 2;
204 
205 			Mat channel[3];
206 			channel[0] = Mat(target_ry, target_rx, CV_32FC1, image->fpdata[BLAYER]);
207 			channel[1] = Mat(target_ry, target_rx, CV_32FC1, image->fpdata[GLAYER]);
208 			channel[2] = Mat(target_ry, target_rx, CV_32FC1, image->fpdata[RLAYER]);
209 
210 			split(*out, channel);
211 
212 			channel[0].release();
213 			channel[1].release();
214 			channel[2].release();
215 		} else {
216 			image->fdata = (float *)out->data;
217 			image->fpdata[RLAYER] = image->fdata;
218 			image->fpdata[GLAYER] = image->fdata;
219 			image->fpdata[BLAYER] = image->fdata;
220 		}
221 		out->release();
222 	}
223 	image->rx = target_rx;
224 	image->ry = target_ry;
225 	image->naxes[0] = image->rx;
226 	image->naxes[1] = image->ry;
227 	invalidate_stats_from_fit(image);
228 	return 0;
229 }
230 
231 /* resizes image to the sizes toX * toY, and stores it back in image */
cvResizeGaussian(fits * image,int toX,int toY,int interpolation)232 int cvResizeGaussian(fits *image, int toX, int toY, int interpolation) {
233 	Mat in, out;
234 	void *bgr = NULL;
235 
236 	if (image_to_Mat(image, &in, &out, &bgr, toX, toY))
237 		return 1;
238 
239 	// OpenCV function
240 	resize(in, out, out.size(), 0, 0, interpolation);
241 
242 	return Mat_to_image(image, &in, &out, bgr, toX, toY);
243 }
244 
cvRotateImage(fits * image,point center,double angle,int interpolation,int cropped)245 int cvRotateImage(fits *image, point center, double angle, int interpolation, int cropped) {
246 	Mat in, out;
247 	void *bgr = NULL;
248 	int target_rx = image->rx, target_ry = image->ry;
249 	Rect frame;
250 	Point2f pt(center.x, center.y);
251 
252 	gboolean is_fast = fmod(angle, 90.0) == 0.0 && fmod(angle, 180) != 0.0;
253 	if (interpolation == -1)
254 		assert(is_fast);
255 
256 	if (is_fast && (interpolation == -1 || !cropped)) {
257 		target_rx = image->ry;
258 		target_ry = image->rx;
259 	}
260 	else if (!cropped) {
261 		frame = RotatedRect(pt, Size(image->rx, image->ry), angle).boundingRect();
262 		target_rx = frame.width;
263 		target_ry = frame.height;
264 		siril_debug_print("after rotation, new image size will be %d x %d\n", target_rx, target_ry);
265 	}
266 
267 	if (image_to_Mat(image, &in, &out, &bgr, target_rx, target_ry))
268 		return 1;
269 
270 	if (is_fast && (interpolation == -1 || !cropped)) {	// fast rotation
271 		transpose(in, out);
272 		/* flip third argument: how to flip the array; 0 means flipping around the
273 		 * x-axis and positive value (for example, 1) means flipping around y-axis.
274 		 * Negative value (for example, -1) means flipping around both axes.
275 		 */
276 		if (angle == 90.0)
277 			flip(out, out, 0);
278 		else // 270, -90
279 			flip(out, out, 1);
280 	} else {
281 		Mat r = getRotationMatrix2D(pt, angle, 1.0);
282 		if (cropped == 1) {
283 			warpAffine(in, out, r, in.size(), interpolation);
284 		} else {
285 			// adjust transformation matrix
286 			r.at<double>(0, 2) += frame.width / 2.0 - pt.x;
287 			r.at<double>(1, 2) += frame.height / 2.0 - pt.y;
288 
289 			warpAffine(in, out, r, frame.size(), interpolation);
290 		}
291 	}
292 
293 	return Mat_to_image(image, &in, &out, bgr, target_rx, target_ry);
294 }
295 
cvAffineTransformation(fits * image,pointf * refpoints,pointf * curpoints,int nb_points,gboolean upscale2x,int interpolation)296 int cvAffineTransformation(fits *image, pointf *refpoints, pointf *curpoints, int nb_points, gboolean upscale2x, int interpolation) {
297 	// see https://docs.opencv.org/3.4/d4/d61/tutorial_warp_affine.html
298 	std::vector<Point2f> ref;
299 	std::vector<Point2f> cur;
300 
301 	/* build vectors with lists of 3 stars. */
302 	for (int i = 0; i < nb_points; i++) {
303 		ref.push_back(Point2f(refpoints[i].x, image->ry - refpoints[i].y - 1));
304 		cur.push_back(Point2f(curpoints[i].x, image->ry - curpoints[i].y - 1));
305 	}
306 
307 	Mat m = estimateAffinePartial2D(cur, ref);
308 	//std::cout << m << std::endl;
309 
310 	/* test that m is not a zero matrix */
311 	if (countNonZero(m) < 1) {
312 		siril_log_color_message(_("Singular Matrix. Cannot compute Affine Transformation.\n"), "red");
313 		return -1;
314 	}
315 
316 	Mat in, out;
317 	void *bgr = NULL;
318 	int target_rx = image->rx, target_ry = image->ry;
319 	if (upscale2x) {
320 		target_rx *= 2;
321 		target_ry *= 2;
322 		m *= 2;
323 	}
324 
325 	if (image_to_Mat(image, &in, &out, &bgr, target_rx, target_ry))
326 		return 1;
327 
328 	warpAffine(in, out, m, out.size(), interpolation, BORDER_TRANSPARENT);
329 
330 	return Mat_to_image(image, &in, &out, bgr, target_rx, target_ry);
331 }
332 
convert_H_to_MatH(Homography * from,Mat & to)333 static void convert_H_to_MatH(Homography *from, Mat &to) {
334 	to.at<double>(0, 0) = from->h00;
335 	to.at<double>(0, 1) = from->h01;
336 	to.at<double>(0, 2) = from->h02;
337 	to.at<double>(1, 0) = from->h10;
338 	to.at<double>(1, 1) = from->h11;
339 	to.at<double>(1, 2) = from->h12;
340 	to.at<double>(2, 0) = from->h20;
341 	to.at<double>(2, 1) = from->h21;
342 	to.at<double>(2, 2) = from->h22;
343 }
344 
convert_MatH_to_H(Mat from,Homography * to)345 static void convert_MatH_to_H(Mat from, Homography *to) {
346 	to->h00 = from.at<double>(0, 0);
347 	to->h01 = from.at<double>(0, 1);
348 	to->h02 = from.at<double>(0, 2);
349 	to->h10 = from.at<double>(1, 0);
350 	to->h11 = from.at<double>(1, 1);
351 	to->h12 = from.at<double>(1, 2);
352 	to->h20 = from.at<double>(2, 0);
353 	to->h21 = from.at<double>(2, 1);
354 	to->h22 = from.at<double>(2, 2);
355 }
356 
cvCalculH(s_star * star_array_img,struct s_star * star_array_ref,int n,Homography * Hom)357 unsigned char *cvCalculH(s_star *star_array_img,
358 		struct s_star *star_array_ref, int n, Homography *Hom) {
359 
360 	std::vector<Point2f> ref;
361 	std::vector<Point2f> img;
362 	Mat mask;
363 	unsigned char *ret = NULL;
364 	int i;
365 
366 	/* build vectors with lists of stars. */
367 	for (i = 0; i < n; i++) {
368 		ref.push_back(Point2f(star_array_ref[i].x, star_array_ref[i].y));
369 		img.push_back(Point2f(star_array_img[i].x, star_array_img[i].y));
370 	}
371 
372 	Mat H = findHomography(img, ref, CV_RANSAC, defaultRANSACReprojThreshold, mask);
373 	if (countNonZero(H) < 1) {
374 		return NULL;
375 	}
376 	Hom->Inliers = countNonZero(mask);
377 	if (n > 0) {
378 		ret = (unsigned char *) malloc(n * sizeof(unsigned char));
379 		for (i = 0; i < n; i++) {
380 			ret[i] = mask.at<uchar>(i);
381 		}
382 	}
383 
384 	convert_MatH_to_H(H, Hom);
385 
386 	mask.release();
387 	return ret;
388 }
389 
390 // transform an image using the homography.
cvTransformImage(fits * image,Homography Hom,gboolean upscale2x,int interpolation)391 int cvTransformImage(fits *image, Homography Hom, gboolean upscale2x, int interpolation) {
392 	Mat in, out;
393 	void *bgr = NULL;
394 	int target_rx = image->rx, target_ry = image->ry;
395 	if (upscale2x) {
396 		target_rx *= 2;
397 		target_ry *= 2;
398 	}
399 
400 	if (image_to_Mat(image, &in, &out, &bgr, target_rx, target_ry))
401 		return 1;
402 
403 	Mat H = Mat(3, 3, CV_64FC1);
404 	convert_H_to_MatH(&Hom, H);
405 
406 	/* modify matrix for reverse Y axis */
407 	Mat F = Mat::eye(3, 3, CV_64FC1);
408 	F.at<double>(1,1) = -1.0;
409 	F.at<double>(1,2) = image->ry - 1.0;
410 	H = F * H * F.inv();
411 
412 	if (upscale2x) {
413 		Mat S = Mat::eye(3, 3, CV_64FC1);
414 		S.at<double>(0,0) = 2.0;
415 		S.at<double>(1,1) = 2.0;
416 		H = S * H;
417 	}
418 
419 	// OpenCV function
420 	warpPerspective(in, out, H, Size(target_rx, target_ry), interpolation, BORDER_TRANSPARENT);
421 
422 	return Mat_to_image(image, &in, &out, bgr, target_rx, target_ry);
423 }
424 
cvUnsharpFilter(fits * image,double sigma,double amount)425 int cvUnsharpFilter(fits* image, double sigma, double amount) {
426 	Mat in, out;
427 	void *bgr = NULL;
428 	int target_rx = image->rx, target_ry = image->ry;
429 
430 	if (image_to_Mat(image, &in, &out, &bgr, target_rx, target_ry))
431 		return 1;
432 
433 	/* 3rd argument: Gaussian kernel size. When width and height are zeros
434 	 * they are computed from sigma.
435 	 */
436 	GaussianBlur(in, out, Size(), sigma);
437 	if (fabs(amount) > 0.0) {
438 		out = in * (1 + amount) + out * (-amount);
439 	}
440 
441 	return Mat_to_image(image, &in, &out, bgr, target_rx, target_ry);
442 }
443 
444 /* Work on grey images. If image is in RGB it must be first converted
445  * in CieLAB. Then, only the first channel is applied
446  */
cvClahe_ushort(fits * image,double clip_limit,int size)447 static int cvClahe_ushort(fits *image, double clip_limit, int size) {
448 	assert(image->data);
449 	assert(image->rx);
450 	assert(image->ry);
451 
452 	// preparing data
453 	Mat in, out;
454 
455 	Ptr<CLAHE> clahe = createCLAHE();
456 	clahe->setClipLimit(clip_limit);
457 	clahe->setTilesGridSize(Size(size, size));
458 
459 	if (image->naxes[2] == 3) {
460 		Mat lab_image;
461 		std::vector<Mat> lab_planes(3);
462 		BYTE *bgrbgr8;
463 		WORD *bgrbgr;
464 
465 		switch (image->bitpix) {
466 			case BYTE_IMG:
467 				bgrbgr8 = fits8_to_bgrbgr(image);
468 				in = Mat(image->ry, image->rx, CV_8UC3, bgrbgr8);
469 				out = Mat();
470 				// convert the RGB color image to Lab
471 				cvtColor(in, lab_image, COLOR_BGR2Lab);
472 
473 				// Extract the L channel
474 				split(lab_image, lab_planes); // now we have the L image in lab_planes[0]
475 
476 				// apply the CLAHE algorithm to the L channel
477 				clahe->apply(lab_planes[0], lab_planes[0]);
478 
479 				// Merge the color planes back into an Lab image
480 				merge(lab_planes, lab_image);
481 
482 				// convert back to RGB
483 				cvtColor(lab_image, out, COLOR_Lab2BGR);
484 				out.convertTo(out, CV_16UC3, 1.0);
485 
486 				free(bgrbgr8);
487 				break;
488 			default:
489 			case USHORT_IMG:
490 				bgrbgr = fits_to_bgrbgr_ushort(image);
491 				in = Mat(image->ry, image->rx, CV_16UC3, bgrbgr);
492 				in.convertTo(in, CV_32F, 1.0 / USHRT_MAX_DOUBLE);
493 				out = Mat();
494 
495 				// convert the RGB color image to Lab
496 				cvtColor(in, lab_image, COLOR_BGR2Lab);
497 
498 				// Extract the L channel
499 				split(lab_image, lab_planes); // now we have the L image in lab_planes[0]
500 
501 				// apply the CLAHE algorithm to the L channel (does not work with 32F images)
502 				lab_planes[0].convertTo(lab_planes[0], CV_16U,	USHRT_MAX_DOUBLE / 100.0);
503 				clahe->apply(lab_planes[0], lab_planes[0]);
504 				lab_planes[0].convertTo(lab_planes[0], CV_32F, 100.0 / USHRT_MAX_DOUBLE);
505 
506 				// Merge the color planes back into an Lab image
507 				merge(lab_planes, lab_image);
508 
509 				// convert back to RGB
510 				cvtColor(lab_image, out, COLOR_Lab2BGR);
511 				out.convertTo(out, CV_16UC3, USHRT_MAX_DOUBLE);
512 
513 				free(bgrbgr);
514 		}
515 
516 	} else {
517 		in = Mat(image->ry, image->rx, CV_16UC1, image->data);
518 		out = Mat();
519 		switch (image->bitpix) {
520 			case BYTE_IMG:
521 				in.convertTo(in, CV_8U, 1.0);
522 				clahe->apply(in, out);
523 				out.convertTo(out, CV_16UC3, 1.0);
524 				// dynamic range is important with CLAHE, use 16 bits output
525 				break;
526 			default:
527 			case USHORT_IMG:
528 				clahe->apply(in, out);
529 		}
530 	}
531 
532 	std::vector<Mat> channel(3);
533 	split(out, channel);
534 
535 	size_t nbpixels = image->naxes[0] * image->naxes[1];
536 	size_t ndata = nbpixels * sizeof(WORD);
537 	if (image->naxes[2] == 3) {
538 		memcpy(image->data, channel[2].data, ndata);
539 		memcpy(image->data + nbpixels, channel[1].data, ndata);
540 		memcpy(image->data + nbpixels * 2, channel[0].data, ndata);
541 		image->pdata[RLAYER] = image->data;
542 		image->pdata[GLAYER] = image->data + nbpixels;
543 		image->pdata[BLAYER] = image->data + nbpixels* 2;
544 	} else {
545 		memcpy(image->data, channel[0].data, ndata);
546 		image->pdata[RLAYER] = image->data;
547 		image->pdata[GLAYER] = image->data;
548 		image->pdata[BLAYER] = image->data;
549 	}
550 
551 	image->rx = out.cols;
552 	image->ry = out.rows;
553 	image->naxes[0] = image->rx;
554 	image->naxes[1] = image->ry;
555 
556 	/* free data */
557 	in.release();
558 	out.release();
559 	channel[0].release();
560 	channel[1].release();
561 	channel[2].release();
562 	invalidate_stats_from_fit(image);
563 
564 	return 0;
565 }
566 
cvClahe_float(fits * image,double clip_limit,int size)567 static int cvClahe_float(fits *image, double clip_limit, int size) {
568 	assert(image->fdata);
569 	assert(image->rx);
570 	assert(image->ry);
571 
572 	// preparing data
573 	Mat in, out;
574 
575 	Ptr<CLAHE> clahe = createCLAHE();
576 	clahe->setClipLimit(clip_limit);
577 	clahe->setTilesGridSize(Size(size, size));
578 
579 	if (image->naxes[2] == 3) {
580 		Mat lab_image;
581 		std::vector<Mat> lab_planes(3);
582 		float *bgrbgr;
583 
584 		bgrbgr = fits_to_bgrbgr_float(image);
585 		in = Mat(image->ry, image->rx, CV_32FC3, bgrbgr);
586 		out = Mat();
587 
588 		// convert the RGB color image to Lab
589 		cvtColor(in, lab_image, COLOR_BGR2Lab);
590 
591 		// Extract the L channel
592 		split(lab_image, lab_planes); // now we have the L image in lab_planes[0]
593 
594 		// apply the CLAHE algorithm to the L channel (does not work with 32F images)
595 		lab_planes[0].convertTo(lab_planes[0], CV_16U, USHRT_MAX_DOUBLE / 100.0);
596 		clahe->apply(lab_planes[0], lab_planes[0]);
597 		lab_planes[0].convertTo(lab_planes[0], CV_32F, 100.0 / USHRT_MAX_DOUBLE);
598 
599 		// Merge the color planes back into an Lab image
600 		merge(lab_planes, lab_image);
601 
602 		// convert back to RGB
603 		cvtColor(lab_image, out, COLOR_Lab2BGR);
604 		out.convertTo(out, CV_32FC3, 1.0);
605 
606 		free(bgrbgr);
607 
608 	} else {
609 		in = Mat(image->ry, image->rx, CV_32FC1, image->fdata);
610 		out = Mat();
611 
612 		in.convertTo(in, CV_16U, USHRT_MAX_DOUBLE);
613 		clahe->apply(in, out);
614 		out.convertTo(out, CV_32F, 1.0 / USHRT_MAX_DOUBLE);
615 	}
616 
617 	std::vector<Mat> channel(3);
618 	split(out, channel);
619 
620 	size_t nbpixels = image->naxes[0] * image->naxes[1];
621 	size_t ndata = nbpixels * sizeof(float);
622 	if (image->naxes[2] == 3) {
623 		memcpy(image->fdata, channel[2].data, ndata);
624 		memcpy(image->fdata + nbpixels, channel[1].data, ndata);
625 		memcpy(image->fdata + nbpixels * 2, channel[0].data, ndata);
626 		image->fpdata[RLAYER] = image->fdata;
627 		image->fpdata[GLAYER] = image->fdata + nbpixels;
628 		image->fpdata[BLAYER] = image->fdata + nbpixels* 2;
629 	} else {
630 		memcpy(image->fdata, channel[0].data, ndata);
631 		image->fpdata[RLAYER] = image->fdata;
632 		image->fpdata[GLAYER] = image->fdata;
633 		image->fpdata[BLAYER] = image->fdata;
634 	}
635 
636 	image->rx = out.cols;
637 	image->ry = out.rows;
638 	image->naxes[0] = image->rx;
639 	image->naxes[1] = image->ry;
640 
641 	/* free data */
642 	in.release();
643 	out.release();
644 	channel[0].release();
645 	channel[1].release();
646 	channel[2].release();
647 	invalidate_stats_from_fit(image);
648 
649 	return 0;
650 }
651 
cvClahe(fits * image,double clip_limit,int size)652 int cvClahe(fits *image, double clip_limit, int size) {
653 	if (image->type == DATA_USHORT)
654 		return cvClahe_ushort(image, clip_limit, size);
655 	if (image->type == DATA_FLOAT)
656 		return cvClahe_float(image, clip_limit, size);
657 	return -1;
658 }
659 
660 
661