calibfilter.cpp
Go to the documentation of this file.
1/*M///////////////////////////////////////////////////////////////////////////////////////
2//
3// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4//
5// By downloading, copying, installing or using the software you agree to this license.
6// If you do not agree to this license, do not download, install,
7// copy or use the software.
8//
9//
10// Intel License Agreement
11// For Open Source Computer Vision Library
12//
13// Copyright (C) 2000, Intel Corporation, all rights reserved.
14// Third party copyrights are property of their respective owners.
15//
16// Redistribution and use in source and binary forms, with or without modification,
17// are permitted provided that the following conditions are met:
18//
19// * Redistribution's of source code must retain the above copyright notice,
20// this list of conditions and the following disclaimer.
21//
22// * Redistribution's in binary form must reproduce the above copyright notice,
23// this list of conditions and the following disclaimer in the documentation
24// and/or other materials provided with the distribution.
25//
26// * The name of Intel Corporation may not be used to endorse or promote products
27// derived from this software without specific prior written permission.
28//
29// This software is provided by the copyright holders and contributors "as is" and
30// any express or implied warranties, including, but not limited to, the implied
31// warranties of merchantability and fitness for a particular purpose are disclaimed.
32// In no event shall the Intel Corporation or contributors be liable for any direct,
33// indirect, incidental, special, exemplary, or consequential damages
34// (including, but not limited to, procurement of substitute goods or services;
35// loss of use, data, or profits; or business interruption) however caused
36// and on any theory of liability, whether in contract, strict liability,
37// or tort (including negligence or otherwise) arising in any way out of
38// the use of this software, even if advised of the possibility of such damage.
39//
40//M*/
41
42#include "calibfilter.h"
43
44#include <stdio.h>
45
46#include <opencv2/calib3d/calib3d_c.h>
47#include <opencv2/core/core_c.h>
48
49#undef quad
50
52{
53 /* etalon data */
56 etalonParams = 0;
58 etalonPoints = 0;
59
60 /* camera data */
61 cameraCount = 1;
62
63 memset(points, 0, sizeof(points));
64 memset(undistMap, 0, sizeof(undistMap));
65 undistImg = 0;
66 memset(latestCounts, 0, sizeof(latestCounts));
67 memset(latestPoints, 0, sizeof(latestPoints));
68#pragma GCC diagnostic push
69#pragma GCC diagnostic ignored "-Wclass-memaccess"
70 memset(&stereo, 0, sizeof(stereo));
71#pragma GCC diagnostic pop
72 maxPoints = 0;
73 framesTotal = 15;
75 isCalibrated = false;
76
77 imgSize = cvSize(0, 0);
78 grayImg = 0;
79 tempImg = 0;
80 storage = 0;
81
82 memset(rectMap, 0, sizeof(rectMap));
83}
84
86{
88 cvFree(&etalonParams);
89 cvFree(&etalonPoints);
90 cvReleaseMat(&grayImg);
91 cvReleaseMat(&tempImg);
92 cvReleaseMat(&undistImg);
93 cvReleaseMemStorage(&storage);
94}
95
96bool
98 double* params,
99 int pointCount,
100 CvPoint2D32f* _points)
101{
102 int i, arrSize;
103
104 Stop();
105
106 for (i = 0; i < MAX_CAMERAS; i++)
107 {
108 cvFree(latestPoints + i);
109 }
110
111 if (type == CV_CALIB_ETALON_USER || type != etalonType)
112 {
113 if (etalonParams != NULL)
114 {
115 cvFree(&etalonParams);
116 }
117 }
118
119 etalonType = type;
120
121 switch (etalonType)
122 {
125 if (!params || cvRound(params[0]) != params[0] || params[0] < 3 ||
126 cvRound(params[1]) != params[1] || params[1] < 3 || params[2] <= 0)
127 {
128 assert(0);
129 return false;
130 }
131
132 pointCount = cvRound((params[0] - 1) * (params[1] - 1));
133 break;
134
137
138 if (!_points || pointCount < 4)
139 {
140 assert(0);
141 return false;
142 }
143 break;
144
145 default:
146 assert(0);
147 return false;
148 }
149
150 if (etalonParamCount > 0)
151 {
152 arrSize = etalonParamCount * sizeof(etalonParams[0]);
153 etalonParams = (double*)cvAlloc(arrSize);
154 }
155
156 arrSize = pointCount * sizeof(etalonPoints[0]);
157
158 if (etalonPointCount != pointCount)
159 {
160 if (etalonPoints != NULL)
161 {
162 cvFree(&etalonPoints);
163 }
164 etalonPointCount = pointCount;
165 etalonPoints = (CvPoint2D32f*)cvAlloc(arrSize);
166 }
167
168 switch (etalonType)
169 {
171 {
172 int etalonWidth = cvRound(params[0]) - 1;
173 int etalonHeight = cvRound(params[1]) - 1;
174 int x, y, k = 0;
175
176 etalonParams[0] = etalonWidth;
177 etalonParams[1] = etalonHeight;
178 etalonParams[2] = params[2];
179
180 for (y = 0; y < etalonHeight; y++)
181 for (x = 0; x < etalonWidth; x++)
182 {
183 etalonPoints[k++] =
184 cvPoint2D32f((etalonWidth - 1 - x) * params[2], y * params[2]);
185 }
186 }
187 break;
188
190 if (params != NULL)
191 {
192 memcpy(etalonParams, params, arrSize);
193 }
194 if (_points != NULL)
195 {
196 memcpy(etalonPoints, _points, arrSize);
197 }
198 break;
199
200 default:
201 assert(0);
202 return false;
203 }
204
205 return true;
206}
207
210 const double** params,
211 int* pointCount,
212 const CvPoint2D32f** _points) const
213{
214 if (paramCount)
215 {
216 *paramCount = etalonParamCount;
217 }
218
219 if (params)
220 {
221 *params = etalonParams;
222 }
223
224 if (pointCount)
225 {
226 *pointCount = etalonPointCount;
227 }
228
229 if (_points)
230 {
231 *_points = etalonPoints;
232 }
233
234 return etalonType;
235}
236
237void
239{
240 Stop();
241
242 if (count != cameraCount)
243 {
244 for (int i = 0; i < cameraCount; i++)
245 {
246 cvFree(points + i);
247 cvFree(latestPoints + i);
248 cvReleaseMat(&undistMap[i][0]);
249 cvReleaseMat(&undistMap[i][1]);
250 cvReleaseMat(&rectMap[i][0]);
251 cvReleaseMat(&rectMap[i][1]);
252 }
253
254 memset(latestCounts, 0, sizeof(latestCounts));
255 maxPoints = 0;
256 cameraCount = count;
257 }
258}
259
260bool
262{
263 if (frames < 5)
264 {
265 assert(0);
266 return false;
267 }
268
269 framesTotal = frames;
270 return true;
271}
272
273/* Calibrates camera using multiple views of calibration pattern */
274static void
275cvCalibrateCamera(int image_count,
276 int* _point_counts,
277 cv::Size image_size,
278 cv::Point2f* _image_points,
279 cv::Point2f* _object_points,
280 float* _distortion_coeffs,
281 float* _camera_matrix,
282 float* _translation_vectors,
283 float* _rotation_matrices,
284 int flags)
285{
286 int i, total = 0;
287 cv::Mat point_counts = cv::Mat(image_count, 1, CV_32SC1, _point_counts);
288 std::vector<std::vector<cv::Point2f>> image_points, object_points;
289 cv::Mat dist_coeffs = cv::Mat(4, 1, CV_32FC1, _distortion_coeffs);
290 cv::Mat camera_matrix = cv::Mat(3, 3, CV_32FC1, _camera_matrix);
291 std::vector<cv::Mat> rotation_matrices;
292 std::vector<cv::Mat> translation_vectors;
293
294 for (i = 0; i < image_count; i++)
295 {
296 total += _point_counts[i];
297 for (int j = 0; j < _point_counts[i]; j++)
298 {
299 std::vector<cv::Point2f> img, obj;
300 img.insert(img.begin(), _image_points, _image_points + _point_counts[i]);
301 obj.insert(obj.begin(), _object_points, _object_points + _point_counts[i]);
302 image_points.push_back(img);
303 object_points.push_back(obj);
304 _image_points += _point_counts[i];
305 _object_points += _point_counts[i];
306 }
307
308 rotation_matrices.emplace_back(1, 9, CV_32FC1, _rotation_matrices);
309 translation_vectors.emplace_back(1, 3, CV_32FC1, _translation_vectors);
310 _rotation_matrices += 9;
311 _translation_vectors += 3;
312 }
313
314
315 cv::calibrateCamera(object_points,
316 image_points,
317 std::move(image_size),
318 camera_matrix,
319 dist_coeffs,
320 rotation_matrices,
321 translation_vectors,
322 flags);
323}
324
325#define icvCheckVector_32f(ptr, len)
326#define icvCheckVector_64f(ptr, len)
327
328CV_INLINE void
329icvMulMatrix_32f(const float* src1, int w1, int h1, const float* src2, int w2, int h2, float* dst)
330{
331 int i, j, k;
332
333 if (w1 != h2)
334 {
335 assert(0);
336 return;
337 }
338
339 for (i = 0; i < h1; i++, src1 += w1, dst += w2)
340 for (j = 0; j < w2; j++)
341 {
342 double s = 0;
343 for (k = 0; k < w1; k++)
344 {
345 s += src1[k] * src2[j + k * w2];
346 }
347 dst[j] = (float)s;
348 }
349
350 icvCheckVector_32f(dst, h1 * w2);
351}
352
353CV_INLINE void
354icvSubVector_64d(const double* src1, const double* src2, double* dst, int len)
355{
356 int i;
357 for (i = 0; i < len; i++)
358 {
359 dst[i] = src1[i] - src2[i];
360 }
361
362 icvCheckVector_64f(dst, len);
363}
364
365CV_INLINE void
366icvMulMatrix_64d(const double* src1,
367 int w1,
368 int h1,
369 const double* src2,
370 int w2,
371 int h2,
372 double* dst)
373{
374 int i, j, k;
375
376 if (w1 != h2)
377 {
378 assert(0);
379 return;
380 }
381
382 for (i = 0; i < h1; i++, src1 += w1, dst += w2)
383 for (j = 0; j < w2; j++)
384 {
385 double s = 0;
386 for (k = 0; k < w1; k++)
387 {
388 s += src1[k] * src2[j + k * w2];
389 }
390 dst[j] = s;
391 }
392
393 icvCheckVector_64f(dst, h1 * w2);
394}
395
396CV_INLINE void
397icvInvertMatrix_64d(double* A, int n, double* invA)
398{
399 CvMat Am = cvMat(n, n, CV_64F, A);
400 CvMat invAm = cvMat(n, n, CV_64F, invA);
401
402 cvInvert(&Am, &invAm, CV_SVD);
403}
404
405CV_INLINE void
406icvAddVector_64d(const double* src1, const double* src2, double* dst, int len)
407{
408 int i;
409 for (i = 0; i < len; i++)
410 {
411 dst[i] = src1[i] + src2[i];
412 }
413
414 icvCheckVector_64f(dst, len);
415}
416
417CV_INLINE void
418icvTransposeMatrix_64d(const double* src, int w, int h, double* dst)
419{
420 int i, j;
421
422 for (i = 0; i < w; i++)
423 for (j = 0; j < h; j++)
424 {
425 *dst++ = src[j * w + i];
426 }
427
428 icvCheckVector_64f(dst, w * h);
429}
430
431double
432cvPseudoInverse(const CvArr* src, CvArr* dst)
433{
434 return cvInvert(src, dst, CV_SVD);
435}
436
437// BEGIN
438/*M///////////////////////////////////////////////////////////////////////////////////////
439//
440// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
441//
442// By downloading, copying, installing or using the software you agree to this license.
443// If you do not agree to this license, do not download, install,
444// copy or use the software.
445//
446//
447// Intel License Agreement
448// For Open Source Computer Vision Library
449//
450// Copyright (C) 2000, Intel Corporation, all rights reserved.
451// Third party copyrights are property of their respective owners.
452//
453// Redistribution and use in source and binary forms, with or without modification,
454// are permitted provided that the following conditions are met:
455//
456// * Redistribution's of source code must retain the above copyright notice,
457// this list of conditions and the following disclaimer.
458//
459// * Redistribution's in binary form must reproduce the above copyright notice,
460// this list of conditions and the following disclaimer in the documentation
461// and/or other materials provided with the distribution.
462//
463// * The name of Intel Corporation may not be used to endorse or promote products
464// derived from this software without specific prior written permission.
465//
466// This software is provided by the copyright holders and contributors "as is" and
467// any express or implied warranties, including, but not limited to, the implied
468// warranties of merchantability and fitness for a particular purpose are disclaimed.
469// In no event shall the Intel Corporation or contributors be liable for any direct,
470// indirect, incidental, special, exemplary, or consequential damages
471// (including, but not limited to, procurement of substitute goods or services;
472// loss of use, data, or profits; or business interruption) however caused
473// and on any theory of liability, whether in contract, strict liability,
474// or tort (including negligence or otherwise) arising in any way out of
475// the use of this software, even if advised of the possibility of such damage.
476//
477//M*/
478
479
480#include <float.h>
481#include <limits.h>
482
483#include <utility>
484
485/* IPP-compatible return codes */
523
524/* Valery Mosyagin */
525
526#undef quad
527
528#define EPS64D 1e-9
529
530using CvMatr32f = float*;
531using CvPoint2D64d = CvPoint2D64f;
532using CvPoint3D64d = CvPoint3D64f;
533using CvVect64d = double*;
534using CvMatr64d = double*;
535using CvVect32f = float*;
536
537#define cvmMul(src1, src2, dst) cvMatMulAdd(src1, src2, 0, dst)
538
539enum
540{
541 CV_MAT32F = CV_32FC1,
542 CV_MAT3x1_32F = CV_32FC1,
543 CV_MAT4x1_32F = CV_32FC1,
544 CV_MAT3x3_32F = CV_32FC1,
545 CV_MAT4x4_32F = CV_32FC1,
546
547 CV_MAT64D = CV_64FC1,
548 CV_MAT3x1_64D = CV_64FC1,
549 CV_MAT4x1_64D = CV_64FC1,
550 CV_MAT3x3_64D = CV_64FC1,
551 CV_MAT4x4_64D = CV_64FC1
552};
553
554int cvComputeEssentialMatrix(CvMatr32f rotMatr, CvMatr32f transVect, CvMatr32f essMatr);
555
557 CvMatr32f fundMatr,
558 CvMatr32f cameraMatr1,
559 CvMatr32f cameraMatr2);
560
561int
562cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr, CvPoint3D32f* epipole1, CvPoint3D32f* epipole2);
563
564void icvTestPoint(CvPoint2D64d testPoint,
565 CvVect64d line1,
566 CvVect64d line2,
567 CvPoint2D64d basePoint,
568 int* result);
569
570
571void icvGetPieceLength3D(CvPoint3D64d point1, CvPoint3D64d point2, double* dist);
572
573int
575 CvPoint3D64d point1,
576 CvPoint3D64d point2,
577 CvPoint3D64d* pointSym2)
578{
579 double len1, len2;
580 double alpha;
581 icvGetPieceLength3D(pointCorner, point1, &len1);
582 if (len1 < EPS64D)
583 {
584 return CV_BADARG_ERR;
585 }
586 icvGetPieceLength3D(pointCorner, point2, &len2);
587 alpha = len2 / len1;
588
589 pointSym2->x = pointCorner.x + alpha * (point1.x - pointCorner.x);
590 pointSym2->y = pointCorner.y + alpha * (point1.y - pointCorner.y);
591 pointSym2->z = pointCorner.z + alpha * (point1.z - pointCorner.z);
592 return CV_NO_ERR;
593}
594
595/* author Valery Mosyagin */
596
597/* Compute 3D point for scanline and alpha betta */
598int
599icvCompute3DPoint(double alpha, double betta, CvStereoLineCoeff* coeffs, CvPoint3D64d* point)
600{
601
602 double partX;
603 double partY;
604 double partZ;
605 double partAll;
606 double invPartAll;
607
608 double alphabetta = alpha * betta;
609
610 partAll = alpha - betta;
611 if (fabs(partAll) > 0.00001) /* alpha must be > betta */
612 {
613
614 partX = coeffs->Xcoef + coeffs->XcoefA * alpha + coeffs->XcoefB * betta +
615 coeffs->XcoefAB * alphabetta;
616
617 partY = coeffs->Ycoef + coeffs->YcoefA * alpha + coeffs->YcoefB * betta +
618 coeffs->YcoefAB * alphabetta;
619
620 partZ = coeffs->Zcoef + coeffs->ZcoefA * alpha + coeffs->ZcoefB * betta +
621 coeffs->ZcoefAB * alphabetta;
622
623 invPartAll = 1.0 / partAll;
624
625 point->x = partX * invPartAll;
626 point->y = partY * invPartAll;
627 point->z = partZ * invPartAll;
628 return CV_NO_ERR;
629 }
630 else
631 {
632 return CV_BADFACTOR_ERR;
633 }
634}
635
636/*--------------------------------------------------------------------------------------*/
637
638/* Compute rotate matrix and trans vector for change system */
639int
641 CvMatr64d transVect1,
642 CvMatr64d rotMatr2,
643 CvMatr64d transVect2,
644 CvMatr64d convRotMatr,
645 CvMatr64d convTransVect)
646{
647 double invRotMatr2[9];
648 double tmpVect[3];
649
650
651 icvInvertMatrix_64d(rotMatr2, 3, invRotMatr2);
652 /* Test for error */
653
654 icvMulMatrix_64d(rotMatr1, 3, 3, invRotMatr2, 3, 3, convRotMatr);
655
656 icvMulMatrix_64d(convRotMatr, 3, 3, transVect2, 1, 3, tmpVect);
657
658 icvSubVector_64d(transVect1, tmpVect, convTransVect, 3);
659
660
661 return CV_NO_ERR;
662}
663
664/*--------------------------------------------------------------------------------------*/
665
666/* Compute point coordinates in other system */
667int
669{
670 double tmpVect[3];
671
672 icvMulMatrix_64d(rotMatr, 3, 3, (double*)&M2, 1, 3, tmpVect);
673
674 icvAddVector_64d(tmpVect, transVect, (double*)M1, 3);
675
676 return CV_NO_ERR;
677}
678
679/*--------------------------------------------------------------------------------------*/
680
682 CvPoint2D64d point2,
683 CvPoint2D64d point3,
684 CvPoint2D64d point4,
685 CvMatr64d camMatr1,
686 CvMatr64d rotMatr1,
687 CvMatr64d transVect1,
688 CvMatr64d camMatr2,
689 CvMatr64d rotMatr2,
690 CvMatr64d transVect2,
691 CvStereoLineCoeff* coeffs,
692 int* needSwapCamera);
693
694static int
695icvComputeCoeffForStereoV3(double quad1[4][2],
696 double quad2[4][2],
697 int numScanlines,
698 CvMatr64d camMatr1,
699 CvMatr64d rotMatr1,
700 CvMatr64d transVect1,
701 CvMatr64d camMatr2,
702 CvMatr64d rotMatr2,
703 CvMatr64d transVect2,
704 CvStereoLineCoeff* startCoeffs,
705 int* needSwapCamera)
706{
707 /* For each pair */
708 /* In this function we must define position of cameras */
709
710 CvPoint2D64d point1;
711 CvPoint2D64d point2;
712 CvPoint2D64d point3;
713 CvPoint2D64d point4;
714
715 int currLine;
716 *needSwapCamera = 0;
717 for (currLine = 0; currLine < numScanlines; currLine++)
718 {
719 /* Compute points */
720 double alpha = ((double)currLine) / ((double)(numScanlines)); /* maybe - 1 */
721
722 point1.x = (1.0 - alpha) * quad1[0][0] + alpha * quad1[3][0];
723 point1.y = (1.0 - alpha) * quad1[0][1] + alpha * quad1[3][1];
724
725 point2.x = (1.0 - alpha) * quad1[1][0] + alpha * quad1[2][0];
726 point2.y = (1.0 - alpha) * quad1[1][1] + alpha * quad1[2][1];
727
728 point3.x = (1.0 - alpha) * quad2[0][0] + alpha * quad2[3][0];
729 point3.y = (1.0 - alpha) * quad2[0][1] + alpha * quad2[3][1];
730
731 point4.x = (1.0 - alpha) * quad2[1][0] + alpha * quad2[2][0];
732 point4.y = (1.0 - alpha) * quad2[1][1] + alpha * quad2[2][1];
733
734 /* We can compute coeffs for this line */
735 icvComCoeffForLine(point1,
736 point2,
737 point3,
738 point4,
739 camMatr1,
740 rotMatr1,
741 transVect1,
742 camMatr2,
743 rotMatr2,
744 transVect2,
745 &startCoeffs[currLine],
746 needSwapCamera);
747 }
748 return CV_NO_ERR;
749}
750
751/*--------------------------------------------------------------------------------------*/
752
753static int icvCvt_32f_64d(float* src, double* dst, int size);
754
755static int
756icvComputeCoeffForStereoNew(double quad1[4][2],
757 double quad2[4][2],
758 int numScanlines,
759 CvMatr32f camMatr1,
760 CvMatr32f rotMatr1,
761 CvMatr32f transVect1,
762 CvMatr32f camMatr2,
763 CvStereoLineCoeff* startCoeffs,
764 int* needSwapCamera)
765{
766 /* Convert data */
767
768 double camMatr1_64d[9];
769 double camMatr2_64d[9];
770
771 double rotMatr1_64d[9];
772 double transVect1_64d[3];
773
774 double rotMatr2_64d[9];
775 double transVect2_64d[3];
776
777 icvCvt_32f_64d(camMatr1, camMatr1_64d, 9);
778 icvCvt_32f_64d(camMatr2, camMatr2_64d, 9);
779
780 icvCvt_32f_64d(rotMatr1, rotMatr1_64d, 9);
781 icvCvt_32f_64d(transVect1, transVect1_64d, 3);
782
783 rotMatr2_64d[0] = 1;
784 rotMatr2_64d[1] = 0;
785 rotMatr2_64d[2] = 0;
786 rotMatr2_64d[3] = 0;
787 rotMatr2_64d[4] = 1;
788 rotMatr2_64d[5] = 0;
789 rotMatr2_64d[6] = 0;
790 rotMatr2_64d[7] = 0;
791 rotMatr2_64d[8] = 1;
792
793 transVect2_64d[0] = 0;
794 transVect2_64d[1] = 0;
795 transVect2_64d[2] = 0;
796
797 int status = icvComputeCoeffForStereoV3(quad1,
798 quad2,
799 numScanlines,
800 camMatr1_64d,
801 rotMatr1_64d,
802 transVect1_64d,
803 camMatr2_64d,
804 rotMatr2_64d,
805 transVect2_64d,
806 startCoeffs,
807 needSwapCamera);
808
809
810 return status;
811}
812
813/*--------------------------------------------------------------------------------------*/
814int
815icvComputeCoeffForStereo(CvStereoCamera* stereoCamera)
816{
817 double quad1[4][2];
818 double quad2[4][2];
819
820 int i;
821 for (i = 0; i < 4; i++)
822 {
823 quad1[i][0] = stereoCamera->quad[0][i].x;
824 quad1[i][1] = stereoCamera->quad[0][i].y;
825
826 quad2[i][0] = stereoCamera->quad[1][i].x;
827 quad2[i][1] = stereoCamera->quad[1][i].y;
828 }
829
830 icvComputeCoeffForStereoNew(quad1,
831 quad2,
832 stereoCamera->warpSize.height,
833 stereoCamera->camera[0]->matrix,
834 stereoCamera->rotMatrix,
835 stereoCamera->transVector,
836 stereoCamera->camera[1]->matrix,
837 stereoCamera->lineCoeffs,
838 &(stereoCamera->needSwapCameras));
839 return CV_OK;
840}
841
843 CvPoint3D64d pointB,
844 CvPoint3D64d pointCam1,
845 double gamma,
846 CvStereoLineCoeff* coeffs);
847
849
851 CvPoint3D64d point12,
852 CvPoint3D64d point21,
853 CvPoint3D64d point22,
854 CvPoint3D64d* midPoint);
855
856/*--------------------------------------------------------------------------------------*/
857int
859 CvPoint2D64d point2,
860 CvPoint2D64d point3,
861 CvPoint2D64d point4,
862 CvMatr64d camMatr1,
863 CvMatr64d rotMatr1,
864 CvMatr64d transVect1,
865 CvMatr64d camMatr2,
866 CvMatr64d rotMatr2,
867 CvMatr64d transVect2,
868 CvStereoLineCoeff* coeffs,
869 int* needSwapCamera)
870{
871 /* Get direction for all points */
872 /* Direction for camera 1 */
873
874 CvPoint3D64f direct1;
875 CvPoint3D64f direct2;
876 CvPoint3D64f camPoint1;
877
878 CvPoint3D64f directS3;
879 CvPoint3D64f directS4;
880 CvPoint3D64f direct3;
881 CvPoint3D64f direct4;
882 CvPoint3D64f camPoint2;
883
884 icvGetDirectionForPoint(point1, camMatr1, &direct1);
885
886 icvGetDirectionForPoint(point2, camMatr1, &direct2);
887
888 /* Direction for camera 2 */
889
890 icvGetDirectionForPoint(point3, camMatr2, &directS3);
891
892 icvGetDirectionForPoint(point4, camMatr2, &directS4);
893
894 /* Create conversion for camera 2: two direction and camera point */
895
896 double convRotMatr[9];
897 double convTransVect[3];
898
900 rotMatr1, transVect1, rotMatr2, transVect2, convRotMatr, convTransVect);
901
902 CvPoint3D64f zeroVect;
903 zeroVect.x = zeroVect.y = zeroVect.z = 0.0;
904 camPoint1.x = camPoint1.y = camPoint1.z = 0.0;
905
906 icvConvertPointSystem(directS3, &direct3, convRotMatr, convTransVect);
907 icvConvertPointSystem(directS4, &direct4, convRotMatr, convTransVect);
908 icvConvertPointSystem(zeroVect, &camPoint2, convRotMatr, convTransVect);
909
910 CvPoint3D64f pointB;
911
912 int postype = 0;
913
914 /* Changed order */
915 /* Compute point B: xB,yB,zB */
916 icvGetCrossLines(camPoint1, direct2, camPoint2, direct3, &pointB);
917
918 if (pointB.z < 0) /* If negative use other lines for cross */
919 {
920 postype = 1;
921 icvGetCrossLines(camPoint1, direct1, camPoint2, direct4, &pointB);
922 }
923
924 CvPoint3D64d pointNewA;
925 CvPoint3D64d pointNewC;
926
927 pointNewA.x = pointNewA.y = pointNewA.z = 0;
928 pointNewC.x = pointNewC.y = pointNewC.z = 0;
929
930 if (postype == 0)
931 {
932 icvGetSymPoint3D(camPoint1, direct1, pointB, &pointNewA);
933
934 icvGetSymPoint3D(camPoint2, direct4, pointB, &pointNewC);
935 }
936 else
937 {
938 /* In this case we must change cameras */
939 *needSwapCamera = 1;
940 icvGetSymPoint3D(camPoint2, direct3, pointB, &pointNewA);
941
942 icvGetSymPoint3D(camPoint1, direct2, pointB, &pointNewC);
943 }
944
945
946 double gamma;
947
948 double xA, yA, zA;
949 double xB, yB, zB;
950 double xC, yC, zC;
951
952 xA = pointNewA.x;
953 yA = pointNewA.y;
954 zA = pointNewA.z;
955
956 xB = pointB.x;
957 yB = pointB.y;
958 zB = pointB.z;
959
960 xC = pointNewC.x;
961 yC = pointNewC.y;
962 zC = pointNewC.z;
963
964 double len1, len2;
965 len1 = sqrt((xA - xB) * (xA - xB) + (yA - yB) * (yA - yB) + (zA - zB) * (zA - zB));
966 len2 = sqrt((xB - xC) * (xB - xC) + (yB - yC) * (yB - yC) + (zB - zC) * (zB - zC));
967 gamma = len2 / len1;
968
969 icvComputeStereoLineCoeffs(pointNewA, pointB, camPoint1, gamma, coeffs);
970
971 return CV_NO_ERR;
972}
973
974/*--------------------------------------------------------------------------------------*/
975
976int
978{
979 /* */
980 double invMatr[9];
981
982 /* Invert matrix */
983
984 icvInvertMatrix_64d(camMatr, 3, invMatr);
985 /* TEST FOR ERRORS */
986
987 double vect[3];
988 vect[0] = point.x;
989 vect[1] = point.y;
990 vect[2] = 1;
991
992 /* Mul matr */
993 icvMulMatrix_64d(invMatr, 3, 3, vect, 1, 3, (double*)direct);
994
995 return CV_NO_ERR;
996}
997
998/*--------------------------------------------------------------------------------------*/
999
1000int
1002 CvPoint3D64d point12,
1003 CvPoint3D64d point21,
1004 CvPoint3D64d point22,
1005 CvPoint3D64d* midPoint)
1006{
1007 double xM, yM, zM;
1008 double xN, yN, zN;
1009
1010 double xA, yA, zA;
1011 double xB, yB, zB;
1012
1013 double xC, yC, zC;
1014 double xD, yD, zD;
1015
1016 xA = point11.x;
1017 yA = point11.y;
1018 zA = point11.z;
1019
1020 xB = point12.x;
1021 yB = point12.y;
1022 zB = point12.z;
1023
1024 xC = point21.x;
1025 yC = point21.y;
1026 zC = point21.z;
1027
1028 xD = point22.x;
1029 yD = point22.y;
1030 zD = point22.z;
1031
1032 double a11, a12, a21, a22;
1033 double b1, b2;
1034
1035 a11 = (xB - xA) * (xB - xA) + (yB - yA) * (yB - yA) + (zB - zA) * (zB - zA);
1036 a12 = -(xD - xC) * (xB - xA) - (yD - yC) * (yB - yA) - (zD - zC) * (zB - zA);
1037 a21 = (xB - xA) * (xD - xC) + (yB - yA) * (yD - yC) + (zB - zA) * (zD - zC);
1038 a22 = -(xD - xC) * (xD - xC) - (yD - yC) * (yD - yC) - (zD - zC) * (zD - zC);
1039 b1 = -((xA - xC) * (xB - xA) + (yA - yC) * (yB - yA) + (zA - zC) * (zB - zA));
1040 b2 = -((xA - xC) * (xD - xC) + (yA - yC) * (yD - yC) + (zA - zC) * (zD - zC));
1041
1042 double delta;
1043 double deltaA, deltaB;
1044 double alpha, betta;
1045
1046 delta = a11 * a22 - a12 * a21;
1047
1048 if (fabs(delta) < EPS64D)
1049 {
1050 /*return ERROR;*/
1051 }
1052
1053 deltaA = b1 * a22 - b2 * a12;
1054 deltaB = a11 * b2 - b1 * a21;
1055
1056 alpha = deltaA / delta;
1057 betta = deltaB / delta;
1058
1059 xM = xA + alpha * (xB - xA);
1060 yM = yA + alpha * (yB - yA);
1061 zM = zA + alpha * (zB - zA);
1062
1063 xN = xC + betta * (xD - xC);
1064 yN = yC + betta * (yD - yC);
1065 zN = zC + betta * (zD - zC);
1066
1067 /* Compute middle point */
1068 midPoint->x = (xM + xN) * 0.5;
1069 midPoint->y = (yM + yN) * 0.5;
1070 midPoint->z = (zM + zN) * 0.5;
1071
1072 return CV_NO_ERR;
1073}
1074
1075/*--------------------------------------------------------------------------------------*/
1076
1077int
1079 CvPoint3D64d pointB,
1080 CvPoint3D64d pointCam1,
1081 double gamma,
1082 CvStereoLineCoeff* coeffs)
1083{
1084 double x1, y1, z1;
1085
1086 x1 = pointCam1.x;
1087 y1 = pointCam1.y;
1088 z1 = pointCam1.z;
1089
1090 double xA, yA, zA;
1091 double xB, yB, zB;
1092
1093 xA = pointA.x;
1094 yA = pointA.y;
1095 zA = pointA.z;
1096
1097 xB = pointB.x;
1098 yB = pointB.y;
1099 zB = pointB.z;
1100
1101 if (gamma > 0)
1102 {
1103 coeffs->Xcoef = -x1 + xA;
1104 coeffs->XcoefA = xB + x1 - xA;
1105 coeffs->XcoefB = -xA - gamma * x1 + gamma * xA;
1106 coeffs->XcoefAB = -xB + xA + gamma * xB - gamma * xA;
1107
1108 coeffs->Ycoef = -y1 + yA;
1109 coeffs->YcoefA = yB + y1 - yA;
1110 coeffs->YcoefB = -yA - gamma * y1 + gamma * yA;
1111 coeffs->YcoefAB = -yB + yA + gamma * yB - gamma * yA;
1112
1113 coeffs->Zcoef = -z1 + zA;
1114 coeffs->ZcoefA = zB + z1 - zA;
1115 coeffs->ZcoefB = -zA - gamma * z1 + gamma * zA;
1116 coeffs->ZcoefAB = -zB + zA + gamma * zB - gamma * zA;
1117 }
1118 else
1119 {
1120 gamma = -gamma;
1121 coeffs->Xcoef = -(-x1 + xA);
1122 coeffs->XcoefB = -(xB + x1 - xA);
1123 coeffs->XcoefA = -(-xA - gamma * x1 + gamma * xA);
1124 coeffs->XcoefAB = -(-xB + xA + gamma * xB - gamma * xA);
1125
1126 coeffs->Ycoef = -(-y1 + yA);
1127 coeffs->YcoefB = -(yB + y1 - yA);
1128 coeffs->YcoefA = -(-yA - gamma * y1 + gamma * yA);
1129 coeffs->YcoefAB = -(-yB + yA + gamma * yB - gamma * yA);
1130
1131 coeffs->Zcoef = -(-z1 + zA);
1132 coeffs->ZcoefB = -(zB + z1 - zA);
1133 coeffs->ZcoefA = -(-zA - gamma * z1 + gamma * zA);
1134 coeffs->ZcoefAB = -(-zB + zA + gamma * zB - gamma * zA);
1135 }
1136
1137
1138 return CV_NO_ERR;
1139}
1140
1141/*--------------------------------------------------------------------------------------*/
1142
1143
1144/*---------------------------------------------------------------------------------------*/
1145
1146/* This function get minimum angle started at point which contains rect */
1147int
1149 CvSize imageSize,
1150 CvPoint2D64d* point1,
1151 CvPoint2D64d* point2)
1152{
1153 /* Get crosslines with image corners */
1154
1155 /* Find four lines */
1156
1157 CvPoint2D64d pa, pb, pc, pd;
1158
1159 pa.x = 0;
1160 pa.y = 0;
1161
1162 pb.x = imageSize.width - 1;
1163 pb.y = 0;
1164
1165 pd.x = imageSize.width - 1;
1166 pd.y = imageSize.height - 1;
1167
1168 pc.x = 0;
1169 pc.y = imageSize.height - 1;
1170
1171 /* We can compute points for angle */
1172 /* Test for place section */
1173
1174 if (startPoint.x < 0)
1175 {
1176 /* 1,4,7 */
1177 if (startPoint.y < 0)
1178 {
1179 /* 1 */
1180 *point1 = pb;
1181 *point2 = pc;
1182 }
1183 else if (startPoint.y > imageSize.height - 1)
1184 {
1185 /* 7 */
1186 *point1 = pa;
1187 *point2 = pd;
1188 }
1189 else
1190 {
1191 /* 4 */
1192 *point1 = pa;
1193 *point2 = pc;
1194 }
1195 }
1196 else if (startPoint.x > imageSize.width - 1)
1197 {
1198 /* 3,6,9 */
1199 if (startPoint.y < 0)
1200 {
1201 /* 3 */
1202 *point1 = pa;
1203 *point2 = pd;
1204 }
1205 else if (startPoint.y > imageSize.height - 1)
1206 {
1207 /* 9 */
1208 *point1 = pb;
1209 *point2 = pc;
1210 }
1211 else
1212 {
1213 /* 6 */
1214 *point1 = pb;
1215 *point2 = pd;
1216 }
1217 }
1218 else
1219 {
1220 /* 2,5,8 */
1221 if (startPoint.y < 0)
1222 {
1223 /* 2 */
1224 if (startPoint.x < imageSize.width / 2)
1225 {
1226 *point1 = pb;
1227 *point2 = pa;
1228 }
1229 else
1230 {
1231 *point1 = pa;
1232 *point2 = pb;
1233 }
1234 }
1235 else if (startPoint.y > imageSize.height - 1)
1236 {
1237 /* 8 */
1238 if (startPoint.x < imageSize.width / 2)
1239 {
1240 *point1 = pc;
1241 *point2 = pd;
1242 }
1243 else
1244 {
1245 *point1 = pd;
1246 *point2 = pc;
1247 }
1248 }
1249 else
1250 {
1251 /* 5 - point in the image */
1252 return 2;
1253 }
1254 }
1255 return 0;
1256} /* GetAngleLine */
1257
1258/*---------------------------------------------------------------------------------------*/
1259
1260void
1262 CvPoint2D64d p_end,
1263 double* a,
1264 double* b,
1265 double* c,
1266 int* result)
1267{
1268 double det;
1269 double detA, detB, detC;
1270
1271 det = p_start.x * p_end.y + p_end.x + p_start.y - p_end.y - p_start.y * p_end.x - p_start.x;
1272 if (fabs(det) < EPS64D) /* Error */
1273 {
1274 *result = 0;
1275 return;
1276 }
1277
1278 detA = p_start.y - p_end.y;
1279 detB = p_end.x - p_start.x;
1280 detC = p_start.x * p_end.y - p_end.x * p_start.y;
1281
1282 double invDet = 1.0 / det;
1283 *a = detA * invDet;
1284 *b = detB * invDet;
1285 *c = detC * invDet;
1286
1287 *result = 1;
1288 return;
1289}
1290
1291/*---------------------------------------------------------------------------------------*/
1292
1293#define icvTransformVector_64d(matr, src, dst, w, h) icvMulMatrix_64d(matr, w, h, src, 1, w, dst)
1294
1295void icvGetCrossRectDirect(CvSize imageSize,
1296 double a,
1297 double b,
1298 double c,
1299 CvPoint2D64d* start,
1300 CvPoint2D64d* end,
1301 int* result);
1302
1303/* Get common area of rectifying */
1304static void
1305icvGetCommonArea(CvSize imageSize,
1306 CvPoint3D64d epipole1,
1307 CvPoint3D64d epipole2,
1308 CvMatr64d fundMatr,
1309 CvVect64d coeff11,
1310 CvVect64d coeff12,
1311 CvVect64d coeff21,
1312 CvVect64d coeff22,
1313 int* result)
1314{
1315 int res = 0;
1316 CvPoint2D64d point11;
1317 CvPoint2D64d point12;
1318 CvPoint2D64d point21;
1319 CvPoint2D64d point22;
1320
1321 double corr11[3];
1322 double corr12[3];
1323 double corr21[3];
1324 double corr22[3];
1325
1326 double pointW11[3];
1327 double pointW12[3];
1328 double pointW21[3];
1329 double pointW22[3];
1330
1331 double transFundMatr[3 * 3];
1332 /* Compute transpose of fundamental matrix */
1333 icvTransposeMatrix_64d(fundMatr, 3, 3, transFundMatr);
1334
1335 CvPoint2D64d epipole1_2d;
1336 CvPoint2D64d epipole2_2d;
1337
1338 if (fabs(epipole1.z) < 1e-8)
1339 {
1340 /* epipole1 in infinity */
1341 *result = 0;
1342 return;
1343 }
1344 epipole1_2d.x = epipole1.x / epipole1.z;
1345 epipole1_2d.y = epipole1.y / epipole1.z;
1346
1347 if (fabs(epipole2.z) < 1e-8)
1348 {
1349 /* epipole2 in infinity */
1350 *result = 0;
1351 return;
1352 }
1353 epipole2_2d.x = epipole2.x / epipole2.z;
1354 epipole2_2d.y = epipole2.y / epipole2.z;
1355
1356 int stat = icvGetAngleLine(epipole1_2d, imageSize, &point11, &point12);
1357 if (stat == 2)
1358 {
1359 /* No angle */
1360 *result = 0;
1361 return;
1362 }
1363
1364 stat = icvGetAngleLine(epipole2_2d, imageSize, &point21, &point22);
1365 if (stat == 2)
1366 {
1367 /* No angle */
1368 *result = 0;
1369 return;
1370 }
1371
1372 /* ============= Computation for line 1 ================ */
1373 /* Find correspondence line for angle points11 */
1374 /* corr21 = Fund'*p1 */
1375
1376 pointW11[0] = point11.x;
1377 pointW11[1] = point11.y;
1378 pointW11[2] = 1.0;
1379
1380 icvTransformVector_64d(transFundMatr, /* !!! Modified from not transposed */
1381 pointW11,
1382 corr21,
1383 3,
1384 3);
1385
1386 /* Find crossing of line with image 2 */
1387 CvPoint2D64d start;
1388 CvPoint2D64d end;
1389 icvGetCrossRectDirect(imageSize, corr21[0], corr21[1], corr21[2], &start, &end, &res);
1390
1391 if (res == 0)
1392 {
1393 /* We have not cross */
1394 /* We must define new angle */
1395
1396 pointW21[0] = point21.x;
1397 pointW21[1] = point21.y;
1398 pointW21[2] = 1.0;
1399
1400 /* Find correspondence line for this angle points */
1401 /* We know point and try to get corr line */
1402 /* For point21 */
1403 /* corr11 = Fund * p21 */
1404
1405 icvTransformVector_64d(fundMatr, /* !!! Modified */
1406 pointW21,
1407 corr11,
1408 3,
1409 3);
1410
1411 /* We have cross. And it's result cross for up line. Set result coefs */
1412
1413 /* Set coefs for line 1 image 1 */
1414 coeff11[0] = corr11[0];
1415 coeff11[1] = corr11[1];
1416 coeff11[2] = corr11[2];
1417
1418 /* Set coefs for line 1 image 2 */
1419 icvGetCoefForPiece(epipole2_2d, point21, &coeff21[0], &coeff21[1], &coeff21[2], &res);
1420 if (res == 0)
1421 {
1422 *result = 0;
1423 return; /* Error */
1424 }
1425 }
1426 else
1427 {
1428 /* Line 1 cross image 2 */
1429 /* Set coefs for line 1 image 1 */
1430 icvGetCoefForPiece(epipole1_2d, point11, &coeff11[0], &coeff11[1], &coeff11[2], &res);
1431 if (res == 0)
1432 {
1433 *result = 0;
1434 return; /* Error */
1435 }
1436
1437 /* Set coefs for line 1 image 2 */
1438 coeff21[0] = corr21[0];
1439 coeff21[1] = corr21[1];
1440 coeff21[2] = corr21[2];
1441 }
1442
1443 /* ============= Computation for line 2 ================ */
1444 /* Find correspondence line for angle points11 */
1445 /* corr22 = Fund*p2 */
1446
1447 pointW12[0] = point12.x;
1448 pointW12[1] = point12.y;
1449 pointW12[2] = 1.0;
1450
1451 icvTransformVector_64d(transFundMatr, pointW12, corr22, 3, 3);
1452
1453 /* Find crossing of line with image 2 */
1454 icvGetCrossRectDirect(imageSize, corr22[0], corr22[1], corr22[2], &start, &end, &res);
1455
1456 if (res == 0)
1457 {
1458 /* We have not cross */
1459 /* We must define new angle */
1460
1461 pointW22[0] = point22.x;
1462 pointW22[1] = point22.y;
1463 pointW22[2] = 1.0;
1464
1465 /* Find correspondence line for this angle points */
1466 /* We know point and try to get corr line */
1467 /* For point21 */
1468 /* corr2 = Fund' * p1 */
1469
1470 icvTransformVector_64d(fundMatr, pointW22, corr12, 3, 3);
1471
1472
1473 /* We have cross. And it's result cross for down line. Set result coefs */
1474
1475 /* Set coefs for line 2 image 1 */
1476 coeff12[0] = corr12[0];
1477 coeff12[1] = corr12[1];
1478 coeff12[2] = corr12[2];
1479
1480 /* Set coefs for line 1 image 2 */
1481 icvGetCoefForPiece(epipole2_2d, point22, &coeff22[0], &coeff22[1], &coeff22[2], &res);
1482 if (res == 0)
1483 {
1484 *result = 0;
1485 return; /* Error */
1486 }
1487 }
1488 else
1489 {
1490 /* Line 2 cross image 2 */
1491 /* Set coefs for line 2 image 1 */
1492 icvGetCoefForPiece(epipole1_2d, point12, &coeff12[0], &coeff12[1], &coeff12[2], &res);
1493 if (res == 0)
1494 {
1495 *result = 0;
1496 return; /* Error */
1497 }
1498
1499 /* Set coefs for line 1 image 2 */
1500 coeff22[0] = corr22[0];
1501 coeff22[1] = corr22[1];
1502 coeff22[2] = corr22[2];
1503 }
1504
1505 /* Now we know common area */
1506
1507 return;
1508
1509} /* GetCommonArea */
1510
1511/*---------------------------------------------------------------------------------------*/
1512
1513/* Get cross for direction1 and direction2 */
1514/* Result = 1 - cross */
1515/* Result = 2 - parallel and not equal */
1516/* Result = 3 - parallel and equal */
1517
1518void
1520{
1521 double det = direct1[0] * direct2[1] - direct2[0] * direct1[1];
1522 double detx = -direct1[2] * direct2[1] + direct1[1] * direct2[2];
1523
1524 if (fabs(det) > EPS64D)
1525 {
1526 /* Have cross */
1527 cross->x = detx / det;
1528 cross->y = (-direct1[0] * direct2[2] + direct2[0] * direct1[2]) / det;
1529 *result = 1;
1530 }
1531 else
1532 {
1533 /* may be parallel */
1534 if (fabs(detx) > EPS64D)
1535 {
1536 /* parallel and not equal */
1537 *result = 2;
1538 }
1539 else
1540 {
1541 /* equals */
1542 *result = 3;
1543 }
1544 }
1545
1546 return;
1547}
1548
1549/*---------------------------------------------------------------------------------------*/
1550
1551/* Get cross for piece p1,p2 and direction a,b,c */
1552/* Result = 0 - no cross */
1553/* Result = 1 - cross */
1554/* Result = 2 - parallel and not equal */
1555/* Result = 3 - parallel and equal */
1556
1557void
1559 CvPoint2D64d p_end,
1560 double a,
1561 double b,
1562 double c,
1564 int* result)
1565{
1566
1567 if ((a * p_start.x + b * p_start.y + c) * (a * p_end.x + b * p_end.y + c) <= 0)
1568 {
1569 /* Have cross */
1570 double det;
1571 double detxc, detyc;
1572
1573 det = a * (p_end.x - p_start.x) + b * (p_end.y - p_start.y);
1574
1575 if (fabs(det) < EPS64D)
1576 {
1577 /* lines are parallel and may be equal or line is point */
1578 if (fabs(a * p_start.x + b * p_start.y + c) < EPS64D)
1579 {
1580 /* line is point or not diff */
1581 *result = 3;
1582 return;
1583 }
1584 else
1585 {
1586 *result = 2;
1587 }
1588 return;
1589 }
1590
1591 detxc = b * (p_end.y * p_start.x - p_start.y * p_end.x) + c * (p_start.x - p_end.x);
1592 detyc = a * (p_end.x * p_start.y - p_start.x * p_end.y) + c * (p_start.y - p_end.y);
1593
1594 cross->x = detxc / det;
1595 cross->y = detyc / det;
1596 *result = 1;
1597 }
1598 else
1599 {
1600 *result = 0;
1601 }
1602 return;
1603}
1604
1605/*--------------------------------------------------------------------------------------*/
1606
1607void
1609 CvPoint2D64d p1_end,
1610 CvPoint2D64d p2_start,
1611 CvPoint2D64d p2_end,
1613 int* result)
1614{
1615 double ex1, ey1, ex2, ey2;
1616 double px1, py1, px2, py2;
1617 double del;
1618 double delA, delB, delX, delY;
1619 double alpha, betta;
1620
1621 ex1 = p1_start.x;
1622 ey1 = p1_start.y;
1623 ex2 = p1_end.x;
1624 ey2 = p1_end.y;
1625
1626 px1 = p2_start.x;
1627 py1 = p2_start.y;
1628 px2 = p2_end.x;
1629 py2 = p2_end.y;
1630
1631 del = (py1 - py2) * (ex1 - ex2) - (px1 - px2) * (ey1 - ey2);
1632 if (fabs(del) <= EPS64D)
1633 {
1634 /* May be they are parallel !!! */
1635 *result = 0;
1636 return;
1637 }
1638
1639 delA = (ey1 - ey2) * (ex1 - px1) + (ex1 - ex2) * (py1 - ey1);
1640 delB = (py1 - py2) * (ex1 - px1) + (px1 - px2) * (py1 - ey1);
1641
1642 alpha = delA / del;
1643 betta = delB / del;
1644
1645 if (alpha < 0 || alpha > 1.0 || betta < 0 || betta > 1.0)
1646 {
1647 *result = 0;
1648 return;
1649 }
1650
1651 delX = (px1 - px2) * (ey1 * (ex1 - ex2) - ex1 * (ey1 - ey2)) +
1652 (ex1 - ex2) * (px1 * (py1 - py2) - py1 * (px1 - px2));
1653
1654 delY = (py1 - py2) * (ey1 * (ex1 - ex2) - ex1 * (ey1 - ey2)) +
1655 (ey1 - ey2) * (px1 * (py1 - py2) - py1 * (px1 - px2));
1656
1657 cross->x = delX / del;
1658 cross->y = delY / del;
1659
1660 *result = 1;
1661 return;
1662}
1663
1664/*---------------------------------------------------------------------------------------*/
1665
1666void
1667icvGetPieceLength(CvPoint2D64d point1, CvPoint2D64d point2, double* dist)
1668{
1669 double dx = point2.x - point1.x;
1670 double dy = point2.y - point1.y;
1671 *dist = sqrt(dx * dx + dy * dy);
1672 return;
1673}
1674
1675/*---------------------------------------------------------------------------------------*/
1676
1677void
1678icvGetPieceLength3D(CvPoint3D64d point1, CvPoint3D64d point2, double* dist)
1679{
1680 double dx = point2.x - point1.x;
1681 double dy = point2.y - point1.y;
1682 double dz = point2.z - point1.z;
1683 *dist = sqrt(dx * dx + dy * dy + dz * dz);
1684 return;
1685}
1686
1687/*---------------------------------------------------------------------------------------*/
1688
1689/* Find line from epipole which cross image rect */
1690/* Find points of cross 0 or 1 or 2. Return number of points in cross */
1691void
1692icvGetCrossRectDirect(CvSize imageSize,
1693 double a,
1694 double b,
1695 double c,
1696 CvPoint2D64d* start,
1697 CvPoint2D64d* end,
1698 int* result)
1699{
1700 CvPoint2D64d frameBeg;
1701 CvPoint2D64d frameEnd;
1703 int haveCross[4];
1704
1705 haveCross[0] = 0;
1706 haveCross[1] = 0;
1707 haveCross[2] = 0;
1708 haveCross[3] = 0;
1709
1710 frameBeg.x = 0;
1711 frameBeg.y = 0;
1712 frameEnd.x = imageSize.width;
1713 frameEnd.y = 0;
1714
1715 icvGetCrossPieceDirect(frameBeg, frameEnd, a, b, c, &cross[0], &haveCross[0]);
1716
1717 frameBeg.x = imageSize.width;
1718 frameBeg.y = 0;
1719 frameEnd.x = imageSize.width;
1720 frameEnd.y = imageSize.height;
1721 icvGetCrossPieceDirect(frameBeg, frameEnd, a, b, c, &cross[1], &haveCross[1]);
1722
1723 frameBeg.x = imageSize.width;
1724 frameBeg.y = imageSize.height;
1725 frameEnd.x = 0;
1726 frameEnd.y = imageSize.height;
1727 icvGetCrossPieceDirect(frameBeg, frameEnd, a, b, c, &cross[2], &haveCross[2]);
1728
1729 frameBeg.x = 0;
1730 frameBeg.y = imageSize.height;
1731 frameEnd.x = 0;
1732 frameEnd.y = 0;
1733 icvGetCrossPieceDirect(frameBeg, frameEnd, a, b, c, &cross[3], &haveCross[3]);
1734
1735 double maxDist;
1736
1737 int maxI = 0, maxJ = 0;
1738
1739
1740 int i, j;
1741
1742 maxDist = -1.0;
1743
1744 double distance;
1745
1746 for (i = 0; i < 3; i++)
1747 {
1748 if (haveCross[i] == 1)
1749 {
1750 for (j = i + 1; j < 4; j++)
1751 {
1752 if (haveCross[j] == 1)
1753 {
1754 /* Compute dist */
1756 if (distance > maxDist)
1757 {
1758 maxI = i;
1759 maxJ = j;
1760 maxDist = distance;
1761 }
1762 }
1763 }
1764 }
1765 }
1766
1767 if (maxDist >= 0)
1768 {
1769 /* We have cross */
1770 *start = cross[maxI];
1771 *result = 1;
1772 if (maxDist > 0)
1773 {
1774 *end = cross[maxJ];
1775 *result = 2;
1776 }
1777 }
1778 else
1779 {
1780 *result = 0;
1781 }
1782
1783 return;
1784} /* GetCrossRectDirect */
1785
1786/*---------------------------------------------------------------------------------------*/
1787void
1789 CvMatr64d camMatr,
1790 CvMatr64d rotMatr,
1791 CvVect64d transVect,
1792 CvPoint2D64d* projPoint)
1793{
1794
1795 double tmpVect1[3];
1796 double tmpVect2[3];
1797
1798 icvMulMatrix_64d(rotMatr, 3, 3, (double*)&point, 1, 3, tmpVect1);
1799
1800 icvAddVector_64d(tmpVect1, transVect, tmpVect2, 3);
1801
1802 icvMulMatrix_64d(camMatr, 3, 3, tmpVect2, 1, 3, tmpVect1);
1803
1804 projPoint->x = tmpVect1[0] / tmpVect1[2];
1805 projPoint->y = tmpVect1[1] / tmpVect1[2];
1806
1807 return;
1808}
1809
1810static int icvCvt_64d_32f(double* src, float* dst, int size);
1811
1812void icvGetCutPiece(CvVect64d areaLineCoef1,
1813 CvVect64d areaLineCoef2,
1814 CvPoint2D64d epipole,
1815 CvSize imageSize,
1816 CvPoint2D64d* point11,
1817 CvPoint2D64d* point12,
1818 CvPoint2D64d* point21,
1819 CvPoint2D64d* point22,
1820 int* result);
1821
1823 CvMatr64d camMatr1,
1824 CvMatr64d camMatr2,
1825 CvPoint2D32f point1,
1826 CvPoint2D32f* point2);
1827
1829 CvMatr64d camMatr1,
1830 CvMatr64d camMatr2,
1831 CvPoint2D32f* point1,
1832 CvPoint2D32f point2);
1833
1834/*---------------------------------------------------------------------------------------*/
1835/* Get quads for transform images */
1836void
1837icvGetQuadsTransform(CvSize imageSize,
1838 CvMatr64d camMatr1,
1839 CvMatr64d rotMatr1,
1840 CvVect64d transVect1,
1841 CvMatr64d camMatr2,
1842 CvMatr64d rotMatr2,
1843 CvVect64d transVect2,
1844 CvSize* warpSize,
1845 double quad1[4][2],
1846 double quad2[4][2],
1847 CvMatr64d fundMatr,
1848 CvPoint3D64d* epipole1,
1849 CvPoint3D64d* epipole2)
1850{
1851 /* First compute fundamental matrix and epipoles */
1852 int res;
1853
1854
1855 /* Compute epipoles and fundamental matrix using new functions */
1856 {
1857 double convRotMatr[9];
1858 double convTransVect[3];
1859
1861 rotMatr1, transVect1, rotMatr2, transVect2, convRotMatr, convTransVect);
1862 float convRotMatr_32f[9];
1863 float convTransVect_32f[3];
1864
1865 icvCvt_64d_32f(convRotMatr, convRotMatr_32f, 9);
1866 icvCvt_64d_32f(convTransVect, convTransVect_32f, 3);
1867
1868 /* We know R and t */
1869 /* Compute essential matrix */
1870 float essMatr[9];
1871 float fundMatr_32f[9];
1872
1873 float camMatr1_32f[9];
1874 float camMatr2_32f[9];
1875
1876 icvCvt_64d_32f(camMatr1, camMatr1_32f, 9);
1877 icvCvt_64d_32f(camMatr2, camMatr2_32f, 9);
1878
1879 cvComputeEssentialMatrix(convRotMatr_32f, convTransVect_32f, essMatr);
1880
1881 cvConvertEssential2Fundamental(essMatr, fundMatr_32f, camMatr1_32f, camMatr2_32f);
1882
1883 CvPoint3D32f epipole1_32f;
1884 CvPoint3D32f epipole2_32f;
1885
1886 cvComputeEpipolesFromFundMatrix(fundMatr_32f, &epipole1_32f, &epipole2_32f);
1887 /* copy to 64d epipoles */
1888 epipole1->x = epipole1_32f.x;
1889 epipole1->y = epipole1_32f.y;
1890 epipole1->z = epipole1_32f.z;
1891
1892 epipole2->x = epipole2_32f.x;
1893 epipole2->y = epipole2_32f.y;
1894 epipole2->z = epipole2_32f.z;
1895
1896 /* Convert fundamental matrix */
1897 icvCvt_32f_64d(fundMatr_32f, fundMatr, 9);
1898 }
1899
1900 double coeff11[3];
1901 double coeff12[3];
1902 double coeff21[3];
1903 double coeff22[3];
1904
1905 icvGetCommonArea(
1906 imageSize, *epipole1, *epipole2, fundMatr, coeff11, coeff12, coeff21, coeff22, &res);
1907
1908 CvPoint2D64d point11, point12, point21, point22;
1909 double width1, width2;
1910 double height1, height2;
1911 double tmpHeight1, tmpHeight2;
1912
1913 CvPoint2D64d epipole1_2d;
1914 CvPoint2D64d epipole2_2d;
1915
1916 /* ----- Image 1 ----- */
1917 if (fabs(epipole1->z) < 1e-8)
1918 {
1919 return;
1920 }
1921 epipole1_2d.x = epipole1->x / epipole1->z;
1922 epipole1_2d.y = epipole1->y / epipole1->z;
1923
1925 coeff11, coeff12, epipole1_2d, imageSize, &point11, &point12, &point21, &point22, &res);
1926
1927 /* Compute distance */
1928 icvGetPieceLength(point11, point21, &width1);
1929 icvGetPieceLength(point11, point12, &tmpHeight1);
1930 icvGetPieceLength(point21, point22, &tmpHeight2);
1931 height1 = MAX(tmpHeight1, tmpHeight2);
1932
1933 quad1[0][0] = point11.x;
1934 quad1[0][1] = point11.y;
1935
1936 quad1[1][0] = point21.x;
1937 quad1[1][1] = point21.y;
1938
1939 quad1[2][0] = point22.x;
1940 quad1[2][1] = point22.y;
1941
1942 quad1[3][0] = point12.x;
1943 quad1[3][1] = point12.y;
1944
1945 /* ----- Image 2 ----- */
1946 if (fabs(epipole2->z) < 1e-8)
1947 {
1948 return;
1949 }
1950 epipole2_2d.x = epipole2->x / epipole2->z;
1951 epipole2_2d.y = epipole2->y / epipole2->z;
1952
1954 coeff21, coeff22, epipole2_2d, imageSize, &point11, &point12, &point21, &point22, &res);
1955
1956 /* Compute distance */
1957 icvGetPieceLength(point11, point21, &width2);
1958 icvGetPieceLength(point11, point12, &tmpHeight1);
1959 icvGetPieceLength(point21, point22, &tmpHeight2);
1960 height2 = MAX(tmpHeight1, tmpHeight2);
1961
1962 quad2[0][0] = point11.x;
1963 quad2[0][1] = point11.y;
1964
1965 quad2[1][0] = point21.x;
1966 quad2[1][1] = point21.y;
1967
1968 quad2[2][0] = point22.x;
1969 quad2[2][1] = point22.y;
1970
1971 quad2[3][0] = point12.x;
1972 quad2[3][1] = point12.y;
1973
1974
1975 /*=======================================================*/
1976 /* This is a new additional way to compute quads. */
1977 /* We must correct quads */
1978 {
1979 double convRotMatr[9];
1980 double convTransVect[3];
1981
1982 double newQuad1[4][2];
1983 double newQuad2[4][2];
1984
1985
1987 rotMatr1, transVect1, rotMatr2, transVect2, convRotMatr, convTransVect);
1988
1989 /* -------------Compute for first image-------------- */
1990 CvPoint2D32f pointb1;
1991 CvPoint2D32f pointe1;
1992
1993 CvPoint2D32f pointb2;
1994 CvPoint2D32f pointe2;
1995
1996 pointb1.x = (float)quad1[0][0];
1997 pointb1.y = (float)quad1[0][1];
1998
1999 pointe1.x = (float)quad1[3][0];
2000 pointe1.y = (float)quad1[3][1];
2001
2002 icvComputeeInfiniteProject1(convRotMatr, camMatr1, camMatr2, pointb1, &pointb2);
2003
2004 icvComputeeInfiniteProject1(convRotMatr, camMatr1, camMatr2, pointe1, &pointe2);
2005
2006 /* JUST TEST FOR POINT */
2007
2008 /* Compute distances */
2009 double dxOld, dyOld;
2010 double dxNew, dyNew;
2011 double distOld, distNew;
2012
2013 dxOld = quad2[1][0] - quad2[0][0];
2014 dyOld = quad2[1][1] - quad2[0][1];
2015 distOld = dxOld * dxOld + dyOld * dyOld;
2016
2017 dxNew = quad2[1][0] - pointb2.x;
2018 dyNew = quad2[1][1] - pointb2.y;
2019 distNew = dxNew * dxNew + dyNew * dyNew;
2020
2021 if (distNew > distOld)
2022 {
2023 /* Get new points for second quad */
2024 newQuad2[0][0] = pointb2.x;
2025 newQuad2[0][1] = pointb2.y;
2026 newQuad2[3][0] = pointe2.x;
2027 newQuad2[3][1] = pointe2.y;
2028 newQuad1[0][0] = quad1[0][0];
2029 newQuad1[0][1] = quad1[0][1];
2030 newQuad1[3][0] = quad1[3][0];
2031 newQuad1[3][1] = quad1[3][1];
2032 }
2033 else
2034 {
2035 /* Get new points for first quad */
2036
2037 pointb2.x = (float)quad2[0][0];
2038 pointb2.y = (float)quad2[0][1];
2039
2040 pointe2.x = (float)quad2[3][0];
2041 pointe2.y = (float)quad2[3][1];
2042
2043 icvComputeeInfiniteProject2(convRotMatr, camMatr1, camMatr2, &pointb1, pointb2);
2044
2045 icvComputeeInfiniteProject2(convRotMatr, camMatr1, camMatr2, &pointe1, pointe2);
2046
2047
2048 /* JUST TEST FOR POINT */
2049
2050 newQuad2[0][0] = quad2[0][0];
2051 newQuad2[0][1] = quad2[0][1];
2052 newQuad2[3][0] = quad2[3][0];
2053 newQuad2[3][1] = quad2[3][1];
2054
2055 newQuad1[0][0] = pointb1.x;
2056 newQuad1[0][1] = pointb1.y;
2057 newQuad1[3][0] = pointe1.x;
2058 newQuad1[3][1] = pointe1.y;
2059 }
2060
2061 /* -------------Compute for second image-------------- */
2062 pointb1.x = (float)quad1[1][0];
2063 pointb1.y = (float)quad1[1][1];
2064
2065 pointe1.x = (float)quad1[2][0];
2066 pointe1.y = (float)quad1[2][1];
2067
2068 icvComputeeInfiniteProject1(convRotMatr, camMatr1, camMatr2, pointb1, &pointb2);
2069
2070 icvComputeeInfiniteProject1(convRotMatr, camMatr1, camMatr2, pointe1, &pointe2);
2071
2072 /* Compute distances */
2073
2074 dxOld = quad2[0][0] - quad2[1][0];
2075 dyOld = quad2[0][1] - quad2[1][1];
2076 distOld = dxOld * dxOld + dyOld * dyOld;
2077
2078 dxNew = quad2[0][0] - pointb2.x;
2079 dyNew = quad2[0][1] - pointb2.y;
2080 distNew = dxNew * dxNew + dyNew * dyNew;
2081
2082 if (distNew > distOld)
2083 {
2084 /* Get new points for second quad */
2085 newQuad2[1][0] = pointb2.x;
2086 newQuad2[1][1] = pointb2.y;
2087 newQuad2[2][0] = pointe2.x;
2088 newQuad2[2][1] = pointe2.y;
2089 newQuad1[1][0] = quad1[1][0];
2090 newQuad1[1][1] = quad1[1][1];
2091 newQuad1[2][0] = quad1[2][0];
2092 newQuad1[2][1] = quad1[2][1];
2093 }
2094 else
2095 {
2096 /* Get new points for first quad */
2097
2098 pointb2.x = (float)quad2[1][0];
2099 pointb2.y = (float)quad2[1][1];
2100
2101 pointe2.x = (float)quad2[2][0];
2102 pointe2.y = (float)quad2[2][1];
2103
2104 icvComputeeInfiniteProject2(convRotMatr, camMatr1, camMatr2, &pointb1, pointb2);
2105
2106 icvComputeeInfiniteProject2(convRotMatr, camMatr1, camMatr2, &pointe1, pointe2);
2107
2108 newQuad2[1][0] = quad2[1][0];
2109 newQuad2[1][1] = quad2[1][1];
2110 newQuad2[2][0] = quad2[2][0];
2111 newQuad2[2][1] = quad2[2][1];
2112
2113 newQuad1[1][0] = pointb1.x;
2114 newQuad1[1][1] = pointb1.y;
2115 newQuad1[2][0] = pointe1.x;
2116 newQuad1[2][1] = pointe1.y;
2117 }
2118
2119
2120 /*-------------------------------------------------------------------------------*/
2121
2122 /* Copy new quads to old quad */
2123 int i;
2124 for (i = 0; i < 4; i++)
2125 {
2126 {
2127 quad1[i][0] = newQuad1[i][0];
2128 quad1[i][1] = newQuad1[i][1];
2129 quad2[i][0] = newQuad2[i][0];
2130 quad2[i][1] = newQuad2[i][1];
2131 }
2132 }
2133 }
2134 /*=======================================================*/
2135
2136 double warpWidth, warpHeight;
2137
2138 warpWidth = MAX(width1, width2);
2139 warpHeight = MAX(height1, height2);
2140
2141 warpSize->width = (int)warpWidth;
2142 warpSize->height = (int)warpHeight;
2143
2144 warpSize->width = cvRound(warpWidth - 1);
2145 warpSize->height = cvRound(warpHeight - 1);
2146
2147 /* !!! by Valery Mosyagin. this lines added just for test no warp */
2148 warpSize->width = imageSize.width;
2149 warpSize->height = imageSize.height;
2150
2151 return;
2152}
2153
2154/*---------------------------------------------------------------------------------------*/
2155
2156static void
2157icvGetQuadsTransformNew(CvSize imageSize,
2158 CvMatr32f camMatr1,
2159 CvMatr32f camMatr2,
2160 CvMatr32f rotMatr1,
2161 CvVect32f transVect1,
2162 CvSize* warpSize,
2163 double quad1[4][2],
2164 double quad2[4][2],
2165 CvMatr32f fundMatr,
2166 CvPoint3D32f* epipole1,
2167 CvPoint3D32f* epipole2)
2168{
2169 /* Convert data */
2170 /* Convert camera matrix */
2171 double camMatr1_64d[9];
2172 double camMatr2_64d[9];
2173 double rotMatr1_64d[9];
2174 double transVect1_64d[3];
2175 double rotMatr2_64d[9];
2176 double transVect2_64d[3];
2177 double fundMatr_64d[9];
2178 CvPoint3D64d epipole1_64d;
2179 CvPoint3D64d epipole2_64d;
2180
2181 icvCvt_32f_64d(camMatr1, camMatr1_64d, 9);
2182 icvCvt_32f_64d(camMatr2, camMatr2_64d, 9);
2183 icvCvt_32f_64d(rotMatr1, rotMatr1_64d, 9);
2184 icvCvt_32f_64d(transVect1, transVect1_64d, 3);
2185
2186 /* Create vector and matrix */
2187
2188 rotMatr2_64d[0] = 1;
2189 rotMatr2_64d[1] = 0;
2190 rotMatr2_64d[2] = 0;
2191 rotMatr2_64d[3] = 0;
2192 rotMatr2_64d[4] = 1;
2193 rotMatr2_64d[5] = 0;
2194 rotMatr2_64d[6] = 0;
2195 rotMatr2_64d[7] = 0;
2196 rotMatr2_64d[8] = 1;
2197
2198 transVect2_64d[0] = 0;
2199 transVect2_64d[1] = 0;
2200 transVect2_64d[2] = 0;
2201
2202 icvGetQuadsTransform(imageSize,
2203 camMatr1_64d,
2204 rotMatr1_64d,
2205 transVect1_64d,
2206 camMatr2_64d,
2207 rotMatr2_64d,
2208 transVect2_64d,
2209 warpSize,
2210 quad1,
2211 quad2,
2212 fundMatr_64d,
2213 &epipole1_64d,
2214 &epipole2_64d);
2215
2216 /* Convert epipoles */
2217 epipole1->x = (float)(epipole1_64d.x);
2218 epipole1->y = (float)(epipole1_64d.y);
2219 epipole1->z = (float)(epipole1_64d.z);
2220
2221 epipole2->x = (float)(epipole2_64d.x);
2222 epipole2->y = (float)(epipole2_64d.y);
2223 epipole2->z = (float)(epipole2_64d.z);
2224
2225 /* Convert fundamental matrix */
2226 icvCvt_64d_32f(fundMatr_64d, fundMatr, 9);
2227
2228 return;
2229}
2230
2231/*---------------------------------------------------------------------------------------*/
2232void
2233icvGetQuadsTransformStruct(CvStereoCamera* stereoCamera)
2234{
2235 /* Wrapper for icvGetQuadsTransformNew */
2236
2237
2238 double quad1[4][2];
2239 double quad2[4][2];
2240
2241 icvGetQuadsTransformNew(cvSize(cvRound(stereoCamera->camera[0]->imgSize[0]),
2242 cvRound(stereoCamera->camera[0]->imgSize[1])),
2243 stereoCamera->camera[0]->matrix,
2244 stereoCamera->camera[1]->matrix,
2245 stereoCamera->rotMatrix,
2246 stereoCamera->transVector,
2247 &(stereoCamera->warpSize),
2248 quad1,
2249 quad2,
2250 stereoCamera->fundMatr,
2251 &(stereoCamera->epipole[0]),
2252 &(stereoCamera->epipole[1]));
2253
2254 int i;
2255 for (i = 0; i < 4; i++)
2256 {
2257 stereoCamera->quad[0][i] = cvPoint2D32f(quad1[i][0], quad1[i][1]);
2258 stereoCamera->quad[1][i] = cvPoint2D32f(quad2[i][0], quad2[i][1]);
2259 }
2260
2261 return;
2262}
2263
2264int icvComputeRestStereoParams(CvStereoCamera* stereoparams);
2265
2266/*---------------------------------------------------------------------------------------*/
2267void
2268icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera)
2269{
2270 /* For given intrinsic and extrinsic parameters computes rest parameters
2271 ** such as fundamental matrix. warping coeffs, epipoles, ...
2272 */
2273
2274
2275 /* compute rotate matrix and translate vector */
2276 double rotMatr1[9];
2277 double rotMatr2[9];
2278
2279 double transVect1[3];
2280 double transVect2[3];
2281
2282 double convRotMatr[9];
2283 double convTransVect[3];
2284
2285 /* fill matrices */
2286 icvCvt_32f_64d(stereoCamera->camera[0]->rotMatr, rotMatr1, 9);
2287 icvCvt_32f_64d(stereoCamera->camera[1]->rotMatr, rotMatr2, 9);
2288
2289 icvCvt_32f_64d(stereoCamera->camera[0]->transVect, transVect1, 3);
2290 icvCvt_32f_64d(stereoCamera->camera[1]->transVect, transVect2, 3);
2291
2293 rotMatr1, transVect1, rotMatr2, transVect2, convRotMatr, convTransVect);
2294
2295 /* copy to stereo camera params */
2296 icvCvt_64d_32f(convRotMatr, stereoCamera->rotMatrix, 9);
2297 icvCvt_64d_32f(convTransVect, stereoCamera->transVector, 3);
2298
2299
2300 icvGetQuadsTransformStruct(stereoCamera);
2301 icvComputeRestStereoParams(stereoCamera);
2302}
2303
2305 CvPoint2D64d point1,
2306 CvPoint2D64d point2,
2307 CvPoint2D64d* midPoint);
2308
2309void icvProjectPointToDirect(CvPoint2D64d point, CvVect64d lineCoeff, CvPoint2D64d* projectPoint);
2310
2311/* Get normal direct to direct in line */
2312void
2314{
2315 normDirect[0] = direct[1];
2316 normDirect[1] = -direct[0];
2317 normDirect[2] = -(normDirect[0] * point.x + normDirect[1] * point.y);
2318 return;
2319}
2320
2321/*---------------------------------------------------------------------------------------*/
2322
2323/* Get cut line for one image */
2324void
2326 CvVect64d areaLineCoef2,
2327 CvPoint2D64d epipole,
2328 CvSize imageSize,
2329 CvPoint2D64d* point11,
2330 CvPoint2D64d* point12,
2331 CvPoint2D64d* point21,
2332 CvPoint2D64d* point22,
2333 int* result)
2334{
2335 /* Compute nearest cut line to epipole */
2336 /* Get corners inside sector */
2337 /* Collect all candidate point */
2338
2339 CvPoint2D64d candPoints[8];
2340 CvPoint2D64d midPoint = {0, 0};
2341 int numPoints = 0;
2342 int res;
2343 int i;
2344
2345 double cutLine1[3];
2346 double cutLine2[3];
2347
2348 /* Find middle line of sector */
2349 double midLine[3] = {0, 0, 0};
2350
2351
2352 /* Different way */
2353 CvPoint2D64d pointOnLine1;
2354 pointOnLine1.x = pointOnLine1.y = 0;
2355 CvPoint2D64d pointOnLine2;
2356 pointOnLine2.x = pointOnLine2.y = 0;
2357
2358 CvPoint2D64d start1, end1;
2359
2361 imageSize, areaLineCoef1[0], areaLineCoef1[1], areaLineCoef1[2], &start1, &end1, &res);
2362 if (res > 0)
2363 {
2364 pointOnLine1 = start1;
2365 }
2366
2368 imageSize, areaLineCoef2[0], areaLineCoef2[1], areaLineCoef2[2], &start1, &end1, &res);
2369 if (res > 0)
2370 {
2371 pointOnLine2 = start1;
2372 }
2373
2374 icvGetMiddleAnglePoint(epipole, pointOnLine1, pointOnLine2, &midPoint);
2375
2376 icvGetCoefForPiece(epipole, midPoint, &midLine[0], &midLine[1], &midLine[2], &res);
2377
2378 /* Test corner points */
2379 CvPoint2D64d cornerPoint;
2380 CvPoint2D64d tmpPoints[2];
2381
2382 cornerPoint.x = 0;
2383 cornerPoint.y = 0;
2384 icvTestPoint(cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
2385 if (res == 1)
2386 {
2387 /* Add point */
2388 candPoints[numPoints] = cornerPoint;
2389 numPoints++;
2390 }
2391
2392 cornerPoint.x = imageSize.width;
2393 cornerPoint.y = 0;
2394 icvTestPoint(cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
2395 if (res == 1)
2396 {
2397 /* Add point */
2398 candPoints[numPoints] = cornerPoint;
2399 numPoints++;
2400 }
2401
2402 cornerPoint.x = imageSize.width;
2403 cornerPoint.y = imageSize.height;
2404 icvTestPoint(cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
2405 if (res == 1)
2406 {
2407 /* Add point */
2408 candPoints[numPoints] = cornerPoint;
2409 numPoints++;
2410 }
2411
2412 cornerPoint.x = 0;
2413 cornerPoint.y = imageSize.height;
2414 icvTestPoint(cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
2415 if (res == 1)
2416 {
2417 /* Add point */
2418 candPoints[numPoints] = cornerPoint;
2419 numPoints++;
2420 }
2421
2422 /* Find cross line 1 with image border */
2423 icvGetCrossRectDirect(imageSize,
2424 areaLineCoef1[0],
2425 areaLineCoef1[1],
2426 areaLineCoef1[2],
2427 &tmpPoints[0],
2428 &tmpPoints[1],
2429 &res);
2430 for (i = 0; i < res; i++)
2431 {
2432 candPoints[numPoints++] = tmpPoints[i];
2433 }
2434
2435 /* Find cross line 2 with image border */
2436 icvGetCrossRectDirect(imageSize,
2437 areaLineCoef2[0],
2438 areaLineCoef2[1],
2439 areaLineCoef2[2],
2440 &tmpPoints[0],
2441 &tmpPoints[1],
2442 &res);
2443
2444 for (i = 0; i < res; i++)
2445 {
2446 candPoints[numPoints++] = tmpPoints[i];
2447 }
2448
2449 if (numPoints < 2)
2450 {
2451 *result = 0;
2452 return; /* Error. Not enough points */
2453 }
2454 /* Project all points to middle line and get max and min */
2455
2456 CvPoint2D64d projPoint;
2457 CvPoint2D64d minPoint;
2458 minPoint.x = minPoint.y = FLT_MAX;
2459 CvPoint2D64d maxPoint;
2460 maxPoint.x = maxPoint.y = -FLT_MAX;
2461
2462
2463 double dist;
2464 double maxDist = 0;
2465 double minDist = 10000000;
2466
2467
2468 for (i = 0; i < numPoints; i++)
2469 {
2470 icvProjectPointToDirect(candPoints[i], midLine, &projPoint);
2471 icvGetPieceLength(epipole, projPoint, &dist);
2472 if (dist < minDist)
2473 {
2474 minDist = dist;
2475 minPoint = projPoint;
2476 }
2477
2478 if (dist > maxDist)
2479 {
2480 maxDist = dist;
2481 maxPoint = projPoint;
2482 }
2483 }
2484
2485 /* We know maximum and minimum points. Now we can compute cut lines */
2486
2487 icvGetNormalDirect(midLine, minPoint, cutLine1);
2488 icvGetNormalDirect(midLine, maxPoint, cutLine2);
2489
2490 /* Test for begin of line. */
2491 CvPoint2D64d tmpPoint2;
2492
2493 /* Get cross with */
2494 icvGetCrossDirectDirect(areaLineCoef1, cutLine1, point11, &res);
2495 icvGetCrossDirectDirect(areaLineCoef2, cutLine1, point12, &res);
2496
2497 icvGetCrossDirectDirect(areaLineCoef1, cutLine2, point21, &res);
2498 icvGetCrossDirectDirect(areaLineCoef2, cutLine2, point22, &res);
2499
2500 if (epipole.x > imageSize.width * 0.5)
2501 {
2502 /* Need to change points */
2503 tmpPoint2 = *point11;
2504 *point11 = *point21;
2505 *point21 = tmpPoint2;
2506
2507 tmpPoint2 = *point12;
2508 *point12 = *point22;
2509 *point22 = tmpPoint2;
2510 }
2511
2512 return;
2513}
2514
2515/*---------------------------------------------------------------------------------------*/
2516/* Get middle angle */
2517void
2519 CvPoint2D64d point1,
2520 CvPoint2D64d point2,
2521 CvPoint2D64d* midPoint)
2522{
2523 /* !!! May be need to return error */
2524
2525 double dist1;
2526 double dist2;
2527 icvGetPieceLength(basePoint, point1, &dist1);
2528 icvGetPieceLength(basePoint, point2, &dist2);
2529 CvPoint2D64d pointNew1;
2530 CvPoint2D64d pointNew2;
2531 double alpha = dist2 / dist1;
2532
2533 pointNew1.x = basePoint.x + (1.0 / alpha) * (point2.x - basePoint.x);
2534 pointNew1.y = basePoint.y + (1.0 / alpha) * (point2.y - basePoint.y);
2535
2536 pointNew2.x = basePoint.x + alpha * (point1.x - basePoint.x);
2537 pointNew2.y = basePoint.y + alpha * (point1.y - basePoint.y);
2538
2539 int res;
2540 icvGetCrossPiecePiece(point1, point2, pointNew1, pointNew2, midPoint, &res);
2541
2542 return;
2543}
2544
2545/*---------------------------------------------------------------------------------------*/
2546
2547
2548/*---------------------------------------------------------------------------------------*/
2549CV_IMPL double
2551{
2552 return (point1.x - basePoint.x) * (point2.y - basePoint.y) -
2553 (point2.x - basePoint.x) * (point1.y - basePoint.y);
2554}
2555
2556/*---------------------------------------------------------------------------------------*/
2557/* Test for point in sector */
2558/* Return 0 - point not inside sector */
2559/* Return 1 - point inside sector */
2560void
2562 CvVect64d line1,
2563 CvVect64d line2,
2564 CvPoint2D64d basePoint,
2565 int* result)
2566{
2567 CvPoint2D64d point1, point2;
2568
2569 icvProjectPointToDirect(testPoint, line1, &point1);
2570 icvProjectPointToDirect(testPoint, line2, &point2);
2571
2572 double sign1 = icvGetVect(basePoint, point1, point2);
2573 double sign2 = icvGetVect(basePoint, point1, testPoint);
2574 if (sign1 * sign2 > 0)
2575 {
2576 /* Correct for first line */
2577 sign1 = -sign1;
2578 sign2 = icvGetVect(basePoint, point2, testPoint);
2579 if (sign1 * sign2 > 0)
2580 {
2581 /* Correct for both lines */
2582 *result = 1;
2583 }
2584 else
2585 {
2586 *result = 0;
2587 }
2588 }
2589 else
2590 {
2591 *result = 0;
2592 }
2593
2594 return;
2595}
2596
2597/*---------------------------------------------------------------------------------------*/
2598/* Project point to line */
2599void
2601{
2602 double a = lineCoeff[0];
2603 double b = lineCoeff[1];
2604
2605 double det = 1.0 / (a * a + b * b);
2606 double delta = a * point.y - b * point.x;
2607
2608 projectPoint->x = (-a * lineCoeff[2] - b * delta) * det;
2609 projectPoint->y = (-b * lineCoeff[2] + a * delta) * det;
2610
2611 return;
2612}
2613
2614/*---------------------------------------------------------------------------------------*/
2615/* Get distance from point to direction */
2616void
2618{
2619 CvPoint2D64d tmpPoint;
2620 icvProjectPointToDirect(point, lineCoef, &tmpPoint);
2621 double dx = point.x - tmpPoint.x;
2622 double dy = point.y - tmpPoint.y;
2623 *dist = sqrt(dx * dx + dy * dy);
2624 return;
2625}
2626
2627/*---------------------------------------------------------------------------------------*/
2628
2629CV_IMPL IplImage*
2630icvCreateIsometricImage(IplImage* src, IplImage* dst, int desired_depth, int desired_num_channels)
2631{
2632 CvSize src_size;
2633 src_size.width = src->width;
2634 src_size.height = src->height;
2635
2636 CvSize dst_size = src_size;
2637
2638 if (dst)
2639 {
2640 dst_size.width = dst->width;
2641 dst_size.height = dst->height;
2642 }
2643
2644 if (!dst || dst->depth != desired_depth || dst->nChannels != desired_num_channels ||
2645 dst_size.width != src_size.width || dst_size.height != src_size.height)
2646 {
2647 cvReleaseImage(&dst);
2648 dst = cvCreateImage(src_size, desired_depth, desired_num_channels);
2649 CvRect rect = cvRect(0, 0, src_size.width, src_size.height);
2650 cvSetImageROI(dst, rect);
2651 }
2652
2653 return dst;
2654}
2655
2656static int
2657icvCvt_32f_64d(float* src, double* dst, int size)
2658{
2659 int t;
2660
2661 if (!src || !dst)
2662 {
2663 return CV_NULLPTR_ERR;
2664 }
2665 if (size <= 0)
2666 {
2667 return CV_BADRANGE_ERR;
2668 }
2669
2670 for (t = 0; t < size; t++)
2671 {
2672 dst[t] = (double)(src[t]);
2673 }
2674
2675 return CV_OK;
2676}
2677
2678/*======================================================================================*/
2679/* Type conversion double -> float */
2680static int
2681icvCvt_64d_32f(double* src, float* dst, int size)
2682{
2683 int t;
2684
2685 if (!src || !dst)
2686 {
2687 return CV_NULLPTR_ERR;
2688 }
2689 if (size <= 0)
2690 {
2691 return CV_BADRANGE_ERR;
2692 }
2693
2694 for (t = 0; t < size; t++)
2695 {
2696 dst[t] = (float)(src[t]);
2697 }
2698
2699 return CV_OK;
2700}
2701
2702CV_INLINE CvSize
2703cvGetMatSize(const CvMat* mat)
2704{
2705 CvSize size;
2706 size.width = mat->cols;
2707 size.height = mat->rows;
2708 return size;
2709}
2710
2711/*----------------------------------------------------------------------------------*/
2712
2713#define __BEGIN__ __CV_BEGIN__
2714#define __END__ __CV_END__
2715
2716/*--------------------------------------------------------------------------------------*/
2717
2718CV_IMPL void
2719cvComputePerspectiveMap(const double c[3][3], CvArr* rectMapX, CvArr* rectMapY)
2720{
2721 CV_FUNCNAME("cvComputePerspectiveMap");
2722
2723 __BEGIN__;
2724
2725 CvSize size;
2726 CvMat stubx, *mapx = (CvMat*)rectMapX;
2727 CvMat stuby, *mapy = (CvMat*)rectMapY;
2728 int i, j;
2729
2730 CV_CALL(mapx = cvGetMat(mapx, &stubx));
2731 CV_CALL(mapy = cvGetMat(mapy, &stuby));
2732
2733 if (CV_MAT_TYPE(mapx->type) != CV_32FC1 || CV_MAT_TYPE(mapy->type) != CV_32FC1)
2734 {
2735 CV_ERROR(CV_StsUnsupportedFormat, "");
2736 }
2737
2738 size = cvGetMatSize(mapx);
2739 assert(fabs(c[2][2] - 1.) < FLT_EPSILON);
2740
2741 for (i = 0; i < size.height; i++)
2742 {
2743 float* mx = (float*)(mapx->data.ptr + mapx->step * i);
2744 float* my = (float*)(mapy->data.ptr + mapy->step * i);
2745
2746 for (j = 0; j < size.width; j++)
2747 {
2748 double w = 1. / (c[2][0] * j + c[2][1] * i + 1.);
2749 double x = (c[0][0] * j + c[0][1] * i + c[0][2]) * w;
2750 double y = (c[1][0] * j + c[1][1] * i + c[1][2]) * w;
2751
2752 mx[j] = (float)x;
2753 my[j] = (float)y;
2754 }
2755 }
2756
2757 __END__;
2758}
2759
2760/*--------------------------------------------------------------------------------------*/
2761
2762CV_IMPL void
2764 const CvPoint2D32f quad[4],
2765 double matrix[3][3],
2766 CvArr* rectMap)
2767{
2768 /* Computes Perspective Transform coeffs and map if need
2769 for given image size and given result quad */
2770 CV_FUNCNAME("cvInitPerspectiveTransform");
2771
2772 __BEGIN__;
2773
2774 double A[64];
2775 double b[8];
2776 double c[8];
2777 CvPoint2D32f pt[4];
2778 CvMat mapstub, *map = (CvMat*)rectMap;
2779 int i, j;
2780
2781 if (map)
2782 {
2783 CV_CALL(map = cvGetMat(map, &mapstub));
2784
2785 if (CV_MAT_TYPE(map->type) != CV_32FC2)
2786 {
2787 CV_ERROR(CV_StsUnsupportedFormat, "");
2788 }
2789
2790 if (map->width != size.width || map->height != size.height)
2791 {
2792 CV_ERROR(CV_StsUnmatchedSizes, "");
2793 }
2794 }
2795
2796 pt[0] = cvPoint2D32f(0, 0);
2797 pt[1] = cvPoint2D32f(size.width, 0);
2798 pt[2] = cvPoint2D32f(size.width, size.height);
2799 pt[3] = cvPoint2D32f(0, size.height);
2800
2801 for (i = 0; i < 4; i++)
2802 {
2803#if 0
2804 double x = quad[i].x;
2805 double y = quad[i].y;
2806 double X = pt[i].x;
2807 double Y = pt[i].y;
2808#else
2809 double x = pt[i].x;
2810 double y = pt[i].y;
2811 double X = quad[i].x;
2812 double Y = quad[i].y;
2813#endif
2814 double* a = A + i * 16;
2815
2816 a[0] = x;
2817 a[1] = y;
2818 a[2] = 1;
2819 a[3] = 0;
2820 a[4] = 0;
2821 a[5] = 0;
2822 a[6] = -X * x;
2823 a[7] = -X * y;
2824
2825 a += 8;
2826
2827 a[0] = 0;
2828 a[1] = 0;
2829 a[2] = 0;
2830 a[3] = x;
2831 a[4] = y;
2832 a[5] = 1;
2833 a[6] = -Y * x;
2834 a[7] = -Y * y;
2835
2836 b[i * 2] = X;
2837 b[i * 2 + 1] = Y;
2838 }
2839
2840 {
2841 double invA[64];
2842 CvMat matA = cvMat(8, 8, CV_64F, A);
2843 CvMat matInvA = cvMat(8, 8, CV_64F, invA);
2844 CvMat matB = cvMat(8, 1, CV_64F, b);
2845 CvMat matX = cvMat(8, 1, CV_64F, c);
2846
2847 CV_CALL(cvPseudoInverse(&matA, &matInvA));
2848 CV_CALL(cvMatMulAdd(&matInvA, &matB, 0, &matX));
2849 }
2850
2851 matrix[0][0] = c[0];
2852 matrix[0][1] = c[1];
2853 matrix[0][2] = c[2];
2854 matrix[1][0] = c[3];
2855 matrix[1][1] = c[4];
2856 matrix[1][2] = c[5];
2857 matrix[2][0] = c[6];
2858 matrix[2][1] = c[7];
2859 matrix[2][2] = 1.0;
2860
2861 if (map)
2862 {
2863 for (i = 0; i < size.height; i++)
2864 {
2865 CvPoint2D32f* maprow = (CvPoint2D32f*)(map->data.ptr + map->step * i);
2866 for (j = 0; j < size.width; j++)
2867 {
2868 double w = 1. / (c[6] * j + c[7] * i + 1.);
2869 double x = (c[0] * j + c[1] * i + c[2]) * w;
2870 double y = (c[3] * j + c[4] * i + c[5]) * w;
2871
2872 maprow[j].x = (float)x;
2873 maprow[j].y = (float)y;
2874 }
2875 }
2876 }
2877
2878 __END__;
2879
2880 return;
2881}
2882
2883/*-----------------------------------------------------------------------*/
2884/* Compute projected infinite point for second image if first image point is known */
2885void
2887 CvMatr64d camMatr1,
2888 CvMatr64d camMatr2,
2889 CvPoint2D32f point1,
2890 CvPoint2D32f* point2)
2891{
2892 double invMatr1[9];
2893 icvInvertMatrix_64d(camMatr1, 3, invMatr1);
2894 double P1[3];
2895 double p1[3];
2896 p1[0] = (double)(point1.x);
2897 p1[1] = (double)(point1.y);
2898 p1[2] = 1;
2899
2900 icvMulMatrix_64d(invMatr1, 3, 3, p1, 1, 3, P1);
2901
2902 double invR[9];
2903 icvTransposeMatrix_64d(rotMatr, 3, 3, invR);
2904
2905 /* Change system 1 to system 2 */
2906 double P2[3];
2907 icvMulMatrix_64d(invR, 3, 3, P1, 1, 3, P2);
2908
2909 /* Now we can project this point to image 2 */
2910 double projP[3];
2911
2912 icvMulMatrix_64d(camMatr2, 3, 3, P2, 1, 3, projP);
2913
2914 point2->x = (float)(projP[0] / projP[2]);
2915 point2->y = (float)(projP[1] / projP[2]);
2916
2917 return;
2918}
2919
2920/*-----------------------------------------------------------------------*/
2921/* Compute projected infinite point for second image if first image point is known */
2922void
2924 CvMatr64d camMatr1,
2925 CvMatr64d camMatr2,
2926 CvPoint2D32f* point1,
2927 CvPoint2D32f point2)
2928{
2929 double invMatr2[9];
2930 icvInvertMatrix_64d(camMatr2, 3, invMatr2);
2931 double P2[3];
2932 double p2[3];
2933 p2[0] = (double)(point2.x);
2934 p2[1] = (double)(point2.y);
2935 p2[2] = 1;
2936
2937 icvMulMatrix_64d(invMatr2, 3, 3, p2, 1, 3, P2);
2938
2939 /* Change system 1 to system 2 */
2940
2941 double P1[3];
2942 icvMulMatrix_64d(rotMatr, 3, 3, P2, 1, 3, P1);
2943
2944 /* Now we can project this point to image 2 */
2945 double projP[3];
2946
2947 icvMulMatrix_64d(camMatr1, 3, 3, P1, 1, 3, projP);
2948
2949 point1->x = (float)(projP[0] / projP[2]);
2950 point1->y = (float)(projP[1] / projP[2]);
2951
2952 return;
2953}
2954
2955/* Simpler version of the previous function */
2956void
2958 CvPoint3D64f* _object_points,
2959 double* _rotation_matrix,
2960 double* _translation_vector,
2961 double* _camera_matrix,
2962 double* _distortion,
2963 CvPoint2D64f* _image_points)
2964{
2965 CvMat object_points = cvMat(point_count, 1, CV_64FC3, _object_points);
2966 CvMat image_points = cvMat(point_count, 1, CV_64FC2, _image_points);
2967 CvMat rotation_matrix = cvMat(3, 3, CV_64FC1, _rotation_matrix);
2968 CvMat translation_vector = cvMat(3, 1, CV_64FC1, _translation_vector);
2969 CvMat camera_matrix = cvMat(3, 3, CV_64FC1, _camera_matrix);
2970 CvMat dist_coeffs = cvMat(4, 1, CV_64FC1, _distortion);
2971
2972 cvProjectPoints2(&object_points,
2973 &rotation_matrix,
2974 &translation_vector,
2975 &camera_matrix,
2976 &dist_coeffs,
2977 &image_points,
2978 0,
2979 0,
2980 0,
2981 0,
2982 0,
2983 0);
2984}
2985
2986/* Select best R and t for given cameras, points, ... */
2987/* For both cameras */
2988static int
2989icvSelectBestRt(int numImages,
2990 int* numPoints,
2991 CvPoint2D32f* imagePoints1,
2992 CvPoint2D32f* imagePoints2,
2993 CvPoint3D32f* objectPoints,
2994
2995 CvMatr32f cameraMatrix1,
2996 CvVect32f distortion1,
2997 CvMatr32f rotMatrs1,
2998 CvVect32f transVects1,
2999
3000 CvMatr32f cameraMatrix2,
3001 CvVect32f distortion2,
3002 CvMatr32f rotMatrs2,
3003 CvVect32f transVects2,
3004
3005 CvMatr32f bestRotMatr,
3006 CvVect32f bestTransVect)
3007{
3008
3009 /* Need to convert input data 32 -> 64 */
3010 CvPoint3D64d* objectPoints_64d;
3011
3012 double* rotMatrs1_64d;
3013 double* rotMatrs2_64d;
3014
3015 double* transVects1_64d;
3016 double* transVects2_64d;
3017
3018 double cameraMatrix1_64d[9];
3019 double cameraMatrix2_64d[9];
3020
3021 double distortion1_64d[4];
3022 double distortion2_64d[4];
3023
3024 /* allocate memory for 64d data */
3025 int totalNum = 0;
3026
3027 for (int i = 0; i < numImages; i++)
3028 {
3029 totalNum += numPoints[i];
3030 }
3031
3032 objectPoints_64d = (CvPoint3D64d*)calloc(totalNum, sizeof(CvPoint3D64d));
3033
3034 rotMatrs1_64d = (double*)calloc(numImages, sizeof(double) * 9);
3035 rotMatrs2_64d = (double*)calloc(numImages, sizeof(double) * 9);
3036
3037 transVects1_64d = (double*)calloc(numImages, sizeof(double) * 3);
3038 transVects2_64d = (double*)calloc(numImages, sizeof(double) * 3);
3039
3040 /* Convert input data to 64d */
3041
3042 icvCvt_32f_64d((float*)objectPoints, (double*)objectPoints_64d, totalNum * 3);
3043
3044 icvCvt_32f_64d(rotMatrs1, rotMatrs1_64d, numImages * 9);
3045 icvCvt_32f_64d(rotMatrs2, rotMatrs2_64d, numImages * 9);
3046
3047 icvCvt_32f_64d(transVects1, transVects1_64d, numImages * 3);
3048 icvCvt_32f_64d(transVects2, transVects2_64d, numImages * 3);
3049
3050 /* Convert to arrays */
3051 icvCvt_32f_64d(cameraMatrix1, cameraMatrix1_64d, 9);
3052 icvCvt_32f_64d(cameraMatrix2, cameraMatrix2_64d, 9);
3053
3054 icvCvt_32f_64d(distortion1, distortion1_64d, 4);
3055 icvCvt_32f_64d(distortion2, distortion2_64d, 4);
3056
3057
3058 /* for each R and t compute error for image pair */
3059 float* errors;
3060
3061 errors = (float*)calloc(numImages * numImages, sizeof(float));
3062 if (errors == 0)
3063 {
3064 return CV_OUTOFMEM_ERR;
3065 }
3066
3067 int currImagePair;
3068 int currRt;
3069 for (currRt = 0; currRt < numImages; currRt++)
3070 {
3071 int begPoint = 0;
3072 for (currImagePair = 0; currImagePair < numImages; currImagePair++)
3073 {
3074 /* For current R,t R,t compute relative position of cameras */
3075
3076 double convRotMatr[9];
3077 double convTransVect[3];
3078
3079 icvCreateConvertMatrVect(rotMatrs1_64d + currRt * 9,
3080 transVects1_64d + currRt * 3,
3081 rotMatrs2_64d + currRt * 9,
3082 transVects2_64d + currRt * 3,
3083 convRotMatr,
3084 convTransVect);
3085
3086 /* Project points using relative position of cameras */
3087
3088 double convRotMatr2[9];
3089 double convTransVect2[3];
3090
3091 convRotMatr2[0] = 1;
3092 convRotMatr2[1] = 0;
3093 convRotMatr2[2] = 0;
3094
3095 convRotMatr2[3] = 0;
3096 convRotMatr2[4] = 1;
3097 convRotMatr2[5] = 0;
3098
3099 convRotMatr2[6] = 0;
3100 convRotMatr2[7] = 0;
3101 convRotMatr2[8] = 1;
3102
3103 convTransVect2[0] = 0;
3104 convTransVect2[1] = 0;
3105 convTransVect2[2] = 0;
3106
3107 /* Compute error for given pair and Rt */
3108 /* We must project points to image and compute error */
3109
3110 CvPoint2D64d* projImagePoints1;
3111 CvPoint2D64d* projImagePoints2;
3112
3113 CvPoint3D64d* points1;
3114 CvPoint3D64d* points2;
3115
3116 int numberPnt;
3117 numberPnt = numPoints[currImagePair];
3118 projImagePoints1 = (CvPoint2D64d*)calloc(numberPnt, sizeof(CvPoint2D64d));
3119 projImagePoints2 = (CvPoint2D64d*)calloc(numberPnt, sizeof(CvPoint2D64d));
3120
3121 points1 = (CvPoint3D64d*)calloc(numberPnt, sizeof(CvPoint3D64d));
3122 points2 = (CvPoint3D64d*)calloc(numberPnt, sizeof(CvPoint3D64d));
3123
3124 /* Transform object points to first camera position */
3125 for (int i = 0; i < numberPnt; i++)
3126 {
3127 /* Create second camera point */
3128 CvPoint3D64d tmpPoint;
3129 tmpPoint.x = (double)(objectPoints[i].x);
3130 tmpPoint.y = (double)(objectPoints[i].y);
3131 tmpPoint.z = (double)(objectPoints[i].z);
3132
3133 icvConvertPointSystem(tmpPoint,
3134 points2 + i,
3135 rotMatrs2_64d + currImagePair * 9,
3136 transVects2_64d + currImagePair * 3);
3137
3138 /* Create first camera point using R, t */
3139 icvConvertPointSystem(points2[i], points1 + i, convRotMatr, convTransVect);
3140
3141 CvPoint3D64d tmpPoint2 = {0, 0, 0};
3142 icvConvertPointSystem(tmpPoint,
3143 &tmpPoint2,
3144 rotMatrs1_64d + currImagePair * 9,
3145 transVects1_64d + currImagePair * 3);
3146 }
3147
3148 /* Project with no translate and no rotation */
3149
3150 cvProjectPointsSimple(numPoints[currImagePair],
3151 points1,
3152 convRotMatr2,
3153 convTransVect2,
3154 cameraMatrix1_64d,
3155 distortion1_64d,
3156 projImagePoints1);
3157
3158 cvProjectPointsSimple(numPoints[currImagePair],
3159 points2,
3160 convRotMatr2,
3161 convTransVect2,
3162 cameraMatrix2_64d,
3163 distortion2_64d,
3164 projImagePoints2);
3165
3166 /* points are projected. Compute error */
3167
3168 int currPoint;
3169 double err1 = 0;
3170 double err2 = 0;
3171 double err;
3172 for (currPoint = 0; currPoint < numberPnt; currPoint++)
3173 {
3174 double len1, len2;
3175 double dx1, dy1;
3176 dx1 = imagePoints1[begPoint + currPoint].x - projImagePoints1[currPoint].x;
3177 dy1 = imagePoints1[begPoint + currPoint].y - projImagePoints1[currPoint].y;
3178 len1 = sqrt(dx1 * dx1 + dy1 * dy1);
3179 err1 += len1;
3180
3181 double dx2, dy2;
3182 dx2 = imagePoints2[begPoint + currPoint].x - projImagePoints2[currPoint].x;
3183 dy2 = imagePoints2[begPoint + currPoint].y - projImagePoints2[currPoint].y;
3184 len2 = sqrt(dx2 * dx2 + dy2 * dy2);
3185 err2 += len2;
3186 }
3187
3188 err1 /= (float)(numberPnt);
3189 err2 /= (float)(numberPnt);
3190
3191 err = (err1 + err2) * 0.5;
3192 begPoint += numberPnt;
3193
3194 /* Set this error to */
3195 errors[numImages * currImagePair + currRt] = (float)err;
3196
3197 free(points1);
3198 free(points2);
3199 free(projImagePoints1);
3200 free(projImagePoints2);
3201 }
3202 }
3203
3204 /* Just select R and t with minimal average error */
3205
3206 int bestnumRt = 0;
3207 float minError = 0; /* Just for no warnings. Uses 'first' flag. */
3208 int first = 1;
3209 for (currRt = 0; currRt < numImages; currRt++)
3210 {
3211 float avErr = 0;
3212 for (currImagePair = 0; currImagePair < numImages; currImagePair++)
3213 {
3214 avErr += errors[numImages * currImagePair + currRt];
3215 }
3216 avErr /= (float)(numImages);
3217
3218 if (first)
3219 {
3220 bestnumRt = 0;
3221 minError = avErr;
3222 first = 0;
3223 }
3224 else
3225 {
3226 if (avErr < minError)
3227 {
3228 bestnumRt = currRt;
3229 minError = avErr;
3230 }
3231 }
3232 }
3233
3234 double bestRotMatr_64d[9];
3235 double bestTransVect_64d[3];
3236
3237 icvCreateConvertMatrVect(rotMatrs1_64d + bestnumRt * 9,
3238 transVects1_64d + bestnumRt * 3,
3239 rotMatrs2_64d + bestnumRt * 9,
3240 transVects2_64d + bestnumRt * 3,
3241 bestRotMatr_64d,
3242 bestTransVect_64d);
3243
3244 icvCvt_64d_32f(bestRotMatr_64d, bestRotMatr, 9);
3245 icvCvt_64d_32f(bestTransVect_64d, bestTransVect, 3);
3246
3247
3248 free(errors);
3249
3250 return CV_OK;
3251}
3252
3253/* ----------------- Stereo calibration functions --------------------- */
3254
3255float
3256icvDefinePointPosition(CvPoint2D32f point1, CvPoint2D32f point2, CvPoint2D32f point)
3257{
3258 float ax = point2.x - point1.x;
3259 float ay = point2.y - point1.y;
3260
3261 float bx = point.x - point1.x;
3262 float by = point.y - point1.y;
3263
3264 return (ax * by - ay * bx);
3265}
3266
3267/* Convert function for stereo warping */
3268int
3269icvConvertWarpCoordinates(double coeffs[3][3],
3270 CvPoint2D32f* cameraPoint,
3271 CvPoint2D32f* warpPoint,
3272 int direction)
3273{
3274 double x, y;
3275 double det;
3276 if (direction == CV_WARP_TO_CAMERA)
3277 {
3278 /* convert from camera image to warped image coordinates */
3279 x = warpPoint->x;
3280 y = warpPoint->y;
3281
3282 det = (coeffs[2][0] * x + coeffs[2][1] * y + coeffs[2][2]);
3283 if (fabs(det) > 1e-8)
3284 {
3285 cameraPoint->x = (float)((coeffs[0][0] * x + coeffs[0][1] * y + coeffs[0][2]) / det);
3286 cameraPoint->y = (float)((coeffs[1][0] * x + coeffs[1][1] * y + coeffs[1][2]) / det);
3287 return CV_OK;
3288 }
3289 }
3290 else if (direction == CV_CAMERA_TO_WARP)
3291 {
3292 /* convert from warped image to camera image coordinates */
3293 x = cameraPoint->x;
3294 y = cameraPoint->y;
3295
3296 det = (coeffs[2][0] * x - coeffs[0][0]) * (coeffs[2][1] * y - coeffs[1][1]) -
3297 (coeffs[2][1] * x - coeffs[0][1]) * (coeffs[2][0] * y - coeffs[1][0]);
3298
3299 if (fabs(det) > 1e-8)
3300 {
3301 warpPoint->x =
3302 (float)(((coeffs[0][2] - coeffs[2][2] * x) * (coeffs[2][1] * y - coeffs[1][1]) -
3303 (coeffs[2][1] * x - coeffs[0][1]) * (coeffs[1][2] - coeffs[2][2] * y)) /
3304 det);
3305 warpPoint->y =
3306 (float)(((coeffs[2][0] * x - coeffs[0][0]) * (coeffs[1][2] - coeffs[2][2] * y) -
3307 (coeffs[0][2] - coeffs[2][2] * x) * (coeffs[2][0] * y - coeffs[1][0])) /
3308 det);
3309 return CV_OK;
3310 }
3311 }
3312
3313 return CV_BADFACTOR_ERR;
3314}
3315
3316/* Compute stereo params using some camera params */
3317/* by Valery Mosyagin. int ComputeRestStereoParams(StereoParams *stereoparams) */
3318int
3319icvComputeRestStereoParams(CvStereoCamera* stereoparams)
3320{
3321
3322
3323 icvGetQuadsTransformStruct(stereoparams);
3324
3326 stereoparams->warpSize, stereoparams->quad[0], stereoparams->coeffs[0], 0);
3327
3329 stereoparams->warpSize, stereoparams->quad[1], stereoparams->coeffs[1], 0);
3330
3331 /* Create border for warped images */
3332 CvPoint2D32f corns[4];
3333 corns[0].x = 0;
3334 corns[0].y = 0;
3335
3336 corns[1].x = (float)(stereoparams->camera[0]->imgSize[0] - 1);
3337 corns[1].y = 0;
3338
3339 corns[2].x = (float)(stereoparams->camera[0]->imgSize[0] - 1);
3340 corns[2].y = (float)(stereoparams->camera[0]->imgSize[1] - 1);
3341
3342 corns[3].x = 0;
3343 corns[3].y = (float)(stereoparams->camera[0]->imgSize[1] - 1);
3344
3345 for (int i = 0; i < 4; i++)
3346 {
3347 /* For first camera */
3349 stereoparams->coeffs[0], corns + i, stereoparams->border[0] + i, CV_CAMERA_TO_WARP);
3350
3351 /* For second camera */
3353 stereoparams->coeffs[1], corns + i, stereoparams->border[1] + i, CV_CAMERA_TO_WARP);
3354 }
3355
3356 /* Test compute */
3357 {
3358 CvPoint2D32f warpPoints[4];
3359 warpPoints[0] = cvPoint2D32f(0, 0);
3360 warpPoints[1] = cvPoint2D32f(stereoparams->warpSize.width - 1, 0);
3361 warpPoints[2] =
3362 cvPoint2D32f(stereoparams->warpSize.width - 1, stereoparams->warpSize.height - 1);
3363 warpPoints[3] = cvPoint2D32f(0, stereoparams->warpSize.height - 1);
3364
3365 CvPoint2D32f camPoints1[4];
3366 CvPoint2D32f camPoints2[4];
3367
3368 for (int i = 0; i < 4; i++)
3369 {
3371 stereoparams->coeffs[0], camPoints1 + i, warpPoints + i, CV_WARP_TO_CAMERA);
3372
3374 stereoparams->coeffs[1], camPoints2 + i, warpPoints + i, CV_WARP_TO_CAMERA);
3375 }
3376 }
3377
3378
3379 /* Allocate memory for scanlines coeffs */
3380
3381 stereoparams->lineCoeffs =
3382 (CvStereoLineCoeff*)calloc(stereoparams->warpSize.height, sizeof(CvStereoLineCoeff));
3383
3384 /* Compute coeffs for epilines */
3385
3386 icvComputeCoeffForStereo(stereoparams);
3387
3388 /* all coeffs are known */
3389 return CV_OK;
3390}
3391
3392/*-------------------------------------------------------------------------------------------*/
3393
3394int
3396 int* nums,
3397 CvSize imageSize,
3398 CvPoint2D32f* imagePoints1,
3399 CvPoint2D32f* imagePoints2,
3400 CvPoint3D32f* objectPoints,
3401 CvStereoCamera* stereoparams)
3402{
3403 /* Firstly we must calibrate both cameras */
3404 /* Alocate memory for data */
3405 /* Allocate for translate vectors */
3406 float* transVects1;
3407 float* transVects2;
3408 float* rotMatrs1;
3409 float* rotMatrs2;
3410
3411 transVects1 = (float*)calloc(numImages, sizeof(float) * 3);
3412 transVects2 = (float*)calloc(numImages, sizeof(float) * 3);
3413
3414 rotMatrs1 = (float*)calloc(numImages, sizeof(float) * 9);
3415 rotMatrs2 = (float*)calloc(numImages, sizeof(float) * 9);
3416
3417 /* Calibrate first camera */
3418 cvCalibrateCamera(numImages,
3419 nums,
3420 imageSize,
3421 imagePoints1,
3422 objectPoints,
3423 stereoparams->camera[0]->distortion,
3424 stereoparams->camera[0]->matrix,
3425 transVects1,
3426 rotMatrs1,
3427 1);
3428
3429 /* Calibrate second camera */
3430 cvCalibrateCamera(numImages,
3431 nums,
3432 imageSize,
3433 imagePoints2,
3434 objectPoints,
3435 stereoparams->camera[1]->distortion,
3436 stereoparams->camera[1]->matrix,
3437 transVects2,
3438 rotMatrs2,
3439 1);
3440
3441 /* Cameras are calibrated */
3442
3443 stereoparams->camera[0]->imgSize[0] = (float)imageSize.width;
3444 stereoparams->camera[0]->imgSize[1] = (float)imageSize.height;
3445
3446 stereoparams->camera[1]->imgSize[0] = (float)imageSize.width;
3447 stereoparams->camera[1]->imgSize[1] = (float)imageSize.height;
3448
3449 icvSelectBestRt(numImages,
3450 nums,
3451 imagePoints1,
3452 imagePoints2,
3453 objectPoints,
3454 stereoparams->camera[0]->matrix,
3455 stereoparams->camera[0]->distortion,
3456 rotMatrs1,
3457 transVects1,
3458 stereoparams->camera[1]->matrix,
3459 stereoparams->camera[1]->distortion,
3460 rotMatrs2,
3461 transVects2,
3462 stereoparams->rotMatrix,
3463 stereoparams->transVector);
3464
3465 /* Free memory */
3466 free(transVects1);
3467 free(transVects2);
3468 free(rotMatrs1);
3469 free(rotMatrs2);
3470
3471 icvComputeRestStereoParams(stereoparams);
3472
3473 return CV_NO_ERR;
3474}
3475
3476int
3477icvGetCrossPieceVector(CvPoint2D32f p1_start,
3478 CvPoint2D32f p1_end,
3479 CvPoint2D32f v2_start,
3480 CvPoint2D32f v2_end,
3481 CvPoint2D32f* cross)
3482{
3483 double ex1 = p1_start.x;
3484 double ey1 = p1_start.y;
3485 double ex2 = p1_end.x;
3486 double ey2 = p1_end.y;
3487
3488 double px1 = v2_start.x;
3489 double py1 = v2_start.y;
3490 double px2 = v2_end.x;
3491 double py2 = v2_end.y;
3492
3493 double del = (ex1 - ex2) * (py2 - py1) + (ey2 - ey1) * (px2 - px1);
3494 if (del == 0)
3495 {
3496 return -1;
3497 }
3498
3499 double delA = (px1 - ex1) * (py1 - py2) + (ey1 - py1) * (px1 - px2);
3500 //double delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2);
3501
3502 double alpha = delA / del;
3503 //double betta = -delB / del;
3504
3505 if (alpha < 0 || alpha > 1.0)
3506 {
3507 return -1;
3508 }
3509
3510 double delX = (ex1 - ex2) * (py1 * (px1 - px2) - px1 * (py1 - py2)) +
3511 (px1 - px2) * (ex1 * (ey1 - ey2) - ey1 * (ex1 - ex2));
3512
3513 double delY = (ey1 - ey2) * (px1 * (py1 - py2) - py1 * (px1 - px2)) +
3514 (py1 - py2) * (ey1 * (ex1 - ex2) - ex1 * (ey1 - ey2));
3515
3516 cross->x = (float)(delX / del);
3517 cross->y = (float)(-delY / del);
3518 return 1;
3519}
3520
3521int
3523 CvPoint2D32f p2,
3524 float a,
3525 float b,
3526 float c,
3527 CvPoint2D32f* cross)
3528{
3529 double del;
3530 double delX, delY, delA;
3531
3532 double px1, px2, py1, py2;
3533 double X, Y, alpha;
3534
3535 px1 = p1.x;
3536 py1 = p1.y;
3537
3538 px2 = p2.x;
3539 py2 = p2.y;
3540
3541 del = a * (px2 - px1) + b * (py2 - py1);
3542 if (del == 0)
3543 {
3544 return -1;
3545 }
3546
3547 delA = -c - a * px1 - b * py1;
3548 alpha = delA / del;
3549
3550 if (alpha < 0 || alpha > 1.0)
3551 {
3552 return -1; /* no cross */
3553 }
3554
3555 delX = b * (py1 * (px1 - px2) - px1 * (py1 - py2)) + c * (px1 - px2);
3556 delY = a * (px1 * (py1 - py2) - py1 * (px1 - px2)) + c * (py1 - py2);
3557
3558 X = delX / del;
3559 Y = delY / del;
3560
3561 cross->x = (float)X;
3562 cross->y = (float)Y;
3563
3564 return 1;
3565}
3566
3567/* Compute epipoles for fundamental matrix */
3568int
3569cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr, CvPoint3D32f* epipole1, CvPoint3D32f* epipole2)
3570{
3571 /* Decompose fundamental matrix using SVD ( A = U W V') */
3572 CvMat fundMatrC = cvMat(3, 3, CV_MAT32F, fundMatr);
3573
3574 CvMat* matrW = cvCreateMat(3, 3, CV_MAT32F);
3575 CvMat* matrU = cvCreateMat(3, 3, CV_MAT32F);
3576 CvMat* matrV = cvCreateMat(3, 3, CV_MAT32F);
3577
3578 /* From svd we need just last vector of U and V or last row from U' and V' */
3579 /* We get transposed matrices U and V */
3580 cvSVD(&fundMatrC, matrW, matrU, matrV, CV_SVD_V_T | CV_SVD_U_T);
3581
3582 /* Get last row from U' and compute epipole1 */
3583 epipole1->x = matrU->data.fl[6];
3584 epipole1->y = matrU->data.fl[7];
3585 epipole1->z = matrU->data.fl[8];
3586
3587 /* Get last row from V' and compute epipole2 */
3588 epipole2->x = matrV->data.fl[6];
3589 epipole2->y = matrV->data.fl[7];
3590 epipole2->z = matrV->data.fl[8];
3591
3592 cvReleaseMat(&matrW);
3593 cvReleaseMat(&matrU);
3594 cvReleaseMat(&matrV);
3595 return CV_OK;
3596}
3597
3598int
3600 CvMatr32f fundMatr,
3601 CvMatr32f cameraMatr1,
3602 CvMatr32f cameraMatr2)
3603{
3604 /* Fund = inv(CM1') * Ess * inv(CM2) */
3605
3606 CvMat essMatrC = cvMat(3, 3, CV_MAT32F, essMatr);
3607 CvMat fundMatrC = cvMat(3, 3, CV_MAT32F, fundMatr);
3608 CvMat cameraMatr1C = cvMat(3, 3, CV_MAT32F, cameraMatr1);
3609 CvMat cameraMatr2C = cvMat(3, 3, CV_MAT32F, cameraMatr2);
3610
3611 CvMat* invCM2 = cvCreateMat(3, 3, CV_MAT32F);
3612 CvMat* tmpMatr = cvCreateMat(3, 3, CV_MAT32F);
3613 CvMat* invCM1T = cvCreateMat(3, 3, CV_MAT32F);
3614
3615 cvTranspose(&cameraMatr1C, tmpMatr);
3616 cvInvert(tmpMatr, invCM1T);
3617 cvmMul(invCM1T, &essMatrC, tmpMatr);
3618 cvInvert(&cameraMatr2C, invCM2);
3619 cvmMul(tmpMatr, invCM2, &fundMatrC);
3620
3621 /* Scale fundamental matrix */
3622 double scale;
3623 scale = 1.0 / fundMatrC.data.fl[8];
3624 cvConvertScale(&fundMatrC, &fundMatrC, scale);
3625
3626 cvReleaseMat(&invCM2);
3627 cvReleaseMat(&tmpMatr);
3628 cvReleaseMat(&invCM1T);
3629
3630 return CV_OK;
3631}
3632
3633/* Compute essential matrix */
3634
3635int
3637{
3638 float transMatr[9];
3639
3640 /* Make antisymmetric matrix from transpose vector */
3641 transMatr[0] = 0;
3642 transMatr[1] = -transVect[2];
3643 transMatr[2] = transVect[1];
3644
3645 transMatr[3] = transVect[2];
3646 transMatr[4] = 0;
3647 transMatr[5] = -transVect[0];
3648
3649 transMatr[6] = -transVect[1];
3650 transMatr[7] = transVect[0];
3651 transMatr[8] = 0;
3652
3653 icvMulMatrix_32f(transMatr, 3, 3, rotMatr, 3, 3, essMatr);
3654
3655 return CV_OK;
3656}
3657
3658// END
3659
3660void
3661CvCalibFilter::Stop(bool calibrate)
3662{
3663 int i, j;
3664 isCalibrated = false;
3665
3666 // deallocate undistortion maps
3667 for (i = 0; i < cameraCount; i++)
3668 {
3669 cvReleaseMat(&undistMap[i][0]);
3670 cvReleaseMat(&undistMap[i][1]);
3671 cvReleaseMat(&rectMap[i][0]);
3672 cvReleaseMat(&rectMap[i][1]);
3673 }
3674
3675 if (calibrate && framesAccepted > 0)
3676 {
3677 int n = framesAccepted;
3678 CvPoint3D32f* buffer = (CvPoint3D32f*)cvAlloc(n * etalonPointCount * sizeof(buffer[0]));
3679 CvMat mat;
3680 float* rotMatr = (float*)cvAlloc(n * 9 * sizeof(rotMatr[0]));
3681 float* transVect = (float*)cvAlloc(n * 3 * sizeof(transVect[0]));
3682 int* counts = (int*)cvAlloc(n * sizeof(counts[0]));
3683
3684 cvInitMatHeader(&mat, 1, sizeof(CvCamera) / sizeof(float), CV_32FC1, 0);
3685 memset(cameraParams, 0, cameraCount * sizeof(cameraParams[0]));
3686
3687 for (i = 0; i < framesAccepted; i++)
3688 {
3689 counts[i] = etalonPointCount;
3690 for (j = 0; j < etalonPointCount; j++)
3691 buffer[i * etalonPointCount + j] =
3692 cvPoint3D32f(etalonPoints[j].x, etalonPoints[j].y, 0);
3693 }
3694
3695 for (i = 0; i < cameraCount; i++)
3696 {
3697 cvCalibrateCamera(framesAccepted,
3698 counts,
3699 imgSize,
3700 points[i],
3701 buffer,
3702 cameraParams[i].distortion,
3703 cameraParams[i].matrix,
3704 transVect,
3705 rotMatr,
3706 0);
3707
3708 cameraParams[i].imgSize[0] = (float)imgSize.width;
3709 cameraParams[i].imgSize[1] = (float)imgSize.height;
3710
3711 // cameraParams[i].focalLength[0] = cameraParams[i].matrix[0];
3712 // cameraParams[i].focalLength[1] = cameraParams[i].matrix[4];
3713
3714 // cameraParams[i].principalPoint[0] = cameraParams[i].matrix[2];
3715 // cameraParams[i].principalPoint[1] = cameraParams[i].matrix[5];
3716
3717 memcpy(cameraParams[i].rotMatr, rotMatr, 9 * sizeof(rotMatr[0]));
3718 memcpy(cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0]));
3719
3720 mat.data.ptr = (uchar*)(cameraParams + i);
3721
3722 /* check resultant camera parameters: if there are some INF's or NAN's,
3723 stop and reset results */
3724 if (!cvCheckArr(&mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000))
3725 {
3726 break;
3727 }
3728 }
3729
3730
3732
3733 {
3734 /* calibrate stereo cameras */
3735 if (cameraCount == 2)
3736 {
3737 stereo.camera[0] = &cameraParams[0];
3738 stereo.camera[1] = &cameraParams[1];
3739
3741 framesAccepted, counts, imgSize, points[0], points[1], buffer, &stereo);
3742 }
3743 }
3744
3745 cvFree(&buffer);
3746 cvFree(&counts);
3747 cvFree(&rotMatr);
3748 cvFree(&transVect);
3749 }
3750
3751 framesAccepted = 0;
3752}
3753
3754bool
3756{
3757 return FindEtalon((CvMat**)imgs);
3758}
3759
3760int
3762 void*,
3763 CvMemStorage*,
3764 CvSize pattern_size,
3765 CvPoint2D32f* corners,
3766 int* corner_count)
3767{
3768 return cvFindChessboardCorners(
3769 arr, pattern_size, corners, corner_count, CV_CALIB_CB_ADAPTIVE_THRESH);
3770}
3771
3772bool
3774{
3775 bool result = true;
3776
3777 if (!mats || etalonPointCount == 0)
3778 {
3779 assert(0);
3780 result = false;
3781 }
3782
3783 if (result)
3784 {
3785 int i, tempPointCount0 = etalonPointCount * 2;
3786
3787 for (i = 0; i < cameraCount; i++)
3788 {
3789 if (!latestPoints[i])
3790 latestPoints[i] =
3791 (CvPoint2D32f*)cvAlloc(tempPointCount0 * 2 * sizeof(latestPoints[0]));
3792 }
3793
3794 for (i = 0; i < cameraCount; i++)
3795 {
3796 CvSize size;
3797 int tempPointCount = tempPointCount0;
3798 bool found = false;
3799
3800 if (!CV_IS_MAT(mats[i]) && !CV_IS_IMAGE(mats[i]))
3801 {
3802 assert(0);
3803 break;
3804 }
3805
3806 size = cvGetSize(mats[i]);
3807
3808 if (size.width != imgSize.width || size.height != imgSize.height)
3809 {
3810 imgSize = size;
3811 }
3812
3813 if (!grayImg || grayImg->width != imgSize.width || grayImg->height != imgSize.height)
3814 {
3815 cvReleaseMat(&grayImg);
3816 cvReleaseMat(&tempImg);
3817 grayImg = cvCreateMat(imgSize.height, imgSize.width, CV_8UC1);
3818 tempImg = cvCreateMat(imgSize.height, imgSize.width, CV_8UC1);
3819 }
3820
3821 if (!storage)
3822 {
3823 storage = cvCreateMemStorage();
3824 }
3825
3826 switch (etalonType)
3827 {
3829 if (CV_MAT_CN(cvGetElemType(mats[i])) == 1)
3830 {
3831 cvCopy(mats[i], grayImg);
3832 }
3833 else
3834 {
3835 cvCvtColor(mats[i], grayImg, CV_BGR2GRAY);
3836 }
3838 grayImg,
3839 tempImg,
3840 storage,
3841 cvSize(cvRound(etalonParams[0]), cvRound(etalonParams[1])),
3842 latestPoints[i],
3843 &tempPointCount) != 0;
3844 if (found)
3845 cvFindCornerSubPix(
3846 grayImg,
3847 latestPoints[i],
3848 tempPointCount,
3849 cvSize(5, 5),
3850 cvSize(-1, -1),
3851 cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10, 0.1));
3852 break;
3853 default:
3854 assert(0);
3855 result = false;
3856 break;
3857 }
3858
3859 latestCounts[i] = found ? tempPointCount : -tempPointCount;
3860 result = result && found;
3861 }
3862 }
3863
3864 if (storage)
3865 {
3866 cvClearMemStorage(storage);
3867 }
3868
3869 return result;
3870}
3871
3872bool
3873CvCalibFilter::Push(const CvPoint2D32f** pts)
3874{
3875 bool result = true;
3876 int i, newMaxPoints = etalonPointCount * (MAX(framesAccepted, framesTotal) + 1);
3877
3878 isCalibrated = false;
3879
3880 if (!pts)
3881 {
3882 for (i = 0; i < cameraCount; i++)
3883 if (latestCounts[i] <= 0)
3884 {
3885 return false;
3886 }
3887 pts = (const CvPoint2D32f**)latestPoints;
3888 }
3889
3890 for (i = 0; i < cameraCount; i++)
3891 {
3892 if (!pts[i])
3893 {
3894 assert(0);
3895 break;
3896 }
3897
3898 if (maxPoints < newMaxPoints)
3899 {
3900 CvPoint2D32f* prev = points[i];
3901 cvFree(points + i);
3902 points[i] = (CvPoint2D32f*)cvAlloc(newMaxPoints * sizeof(prev[0]));
3903 memcpy(points[i], prev, maxPoints * sizeof(prev[0]));
3904 }
3905
3907 pts[i],
3908 etalonPointCount * sizeof(points[0][0]));
3909 }
3910
3911 if (maxPoints < newMaxPoints)
3912 {
3913 maxPoints = newMaxPoints;
3914 }
3915
3916 result = i == cameraCount;
3917
3918 if (++framesAccepted >= framesTotal)
3919 {
3920 Stop(true);
3921 }
3922 return result;
3923}
3924
3925bool
3926CvCalibFilter::GetLatestPoints(int idx, CvPoint2D32f** pts, int* count, bool* found)
3927{
3928 int n;
3929
3930 if ((unsigned)idx >= (unsigned)cameraCount || !pts || !count || !found)
3931 {
3932 assert(0);
3933 return false;
3934 }
3935
3936 n = latestCounts[idx];
3937
3938 *found = n > 0;
3939 *count = abs(n);
3940 *pts = latestPoints[idx];
3941
3942 return true;
3943}
3944
3945void
3947{
3948 DrawPoints((CvMat**)dst);
3949}
3950
3951void
3953{
3954 int i, j;
3955
3956 if (!dstarr)
3957 {
3958 assert(0);
3959 return;
3960 }
3961
3962 for (i = 0; i < cameraCount; i++)
3963 {
3964 if (dstarr[i] && latestCounts[i])
3965 {
3966 CvMat dst_stub, *dst;
3967 int count = 0;
3968 bool found = false;
3969 CvPoint2D32f* pts = 0;
3970
3971 GetLatestPoints(i, &pts, &count, &found);
3972
3973 dst = cvGetMat(dstarr[i], &dst_stub);
3974
3975 static const CvScalar line_colors[] = {CvScalar(0, 0, 255),
3976 CvScalar(0, 128, 255),
3977 CvScalar(0, 200, 200),
3978 CvScalar(0, 255, 0),
3979 CvScalar(200, 200, 0),
3980 CvScalar(255, 0, 0),
3981 CvScalar(255, 0, 255)};
3982
3983 const int colorCount = sizeof(line_colors) / sizeof(line_colors[0]);
3984 const int r = 4;
3985 CvScalar color = line_colors[0];
3986 CvPoint prev_pt = {0, 0};
3987
3988 for (j = 0; j < count; j++)
3989 {
3990 CvPoint pt;
3991 pt.x = cvRound(pts[j].x);
3992 pt.y = cvRound(pts[j].y);
3993
3994 if (found)
3995 {
3997 {
3998 color = line_colors[(j / cvRound(etalonParams[0])) % colorCount];
3999 }
4000 else
4001 {
4002 color = CV_RGB(0, 255, 0);
4003 }
4004
4005 if (j != 0)
4006 {
4007 cvLine(dst, prev_pt, pt, color, 1, CV_AA);
4008 }
4009 }
4010
4011 cvLine(
4012 dst, cvPoint(pt.x - r, pt.y - r), cvPoint(pt.x + r, pt.y + r), color, 1, CV_AA);
4013
4014 cvLine(
4015 dst, cvPoint(pt.x - r, pt.y + r), cvPoint(pt.x + r, pt.y - r), color, 1, CV_AA);
4016
4017 cvCircle(dst, pt, r + 1, color, 1, CV_AA);
4018
4019 prev_pt = pt;
4020 }
4021 }
4022 }
4023}
4024
4025/* Get total number of frames and already accepted pair of frames */
4026int
4028{
4029 if (total)
4030 {
4031 *total = framesTotal;
4032 }
4033
4034 return framesAccepted;
4035}
4036
4037/* Get camera parameters for specified camera. If camera is not calibrated
4038 the function returns 0 */
4039const CvCamera*
4041{
4042 if ((unsigned)idx >= (unsigned)cameraCount)
4043 {
4044 assert(0);
4045 return 0;
4046 }
4047
4048 return isCalibrated ? cameraParams + idx : 0;
4049}
4050
4051/* Get camera parameters for specified camera. If camera is not calibrated
4052 the function returns 0 */
4053const CvStereoCamera*
4055{
4056 if (!(isCalibrated && cameraCount == 2))
4057 {
4058 assert(0);
4059 return 0;
4060 }
4061
4062 return &stereo;
4063}
4064
4065/* Sets camera parameters for all cameras */
4066bool
4068{
4069 CvMat mat;
4070 int arrSize;
4071
4072 Stop();
4073
4074 if (!params)
4075 {
4076 assert(0);
4077 return false;
4078 }
4079
4080 arrSize = cameraCount * sizeof(params[0]);
4081
4082 cvInitMatHeader(&mat, 1, cameraCount * (arrSize / sizeof(float)), CV_32FC1, params);
4083 cvCheckArr(&mat, CV_CHECK_RANGE, -10000, 10000);
4084
4085 memcpy(cameraParams, params, arrSize);
4086 isCalibrated = true;
4087
4088 return true;
4089}
4090
4091bool
4093{
4094 if (isCalibrated)
4095 {
4096 int i, j;
4097
4098 FILE* f = fopen(filename, "w");
4099
4100 if (!f)
4101 {
4102 return false;
4103 }
4104
4105 fprintf(f, "%d\n\n", cameraCount);
4106
4107 for (i = 0; i < cameraCount; i++)
4108 {
4109 for (j = 0; j < (int)(sizeof(cameraParams[i]) / sizeof(float)); j++)
4110 {
4111 fprintf(f, "%15.10f ", ((float*)(cameraParams + i))[j]);
4112 }
4113 fprintf(f, "\n\n");
4114 }
4115
4116 /* Save stereo params */
4117
4118 /* Save quad */
4119 for (i = 0; i < 2; i++)
4120 {
4121 for (j = 0; j < 4; j++)
4122 {
4123 fprintf(f, "%15.10f ", stereo.quad[i][j].x);
4124 fprintf(f, "%15.10f ", stereo.quad[i][j].y);
4125 }
4126 fprintf(f, "\n");
4127 }
4128
4129 /* Save coeffs */
4130 for (i = 0; i < 2; i++)
4131 {
4132 for (j = 0; j < 9; j++)
4133 {
4134 fprintf(f, "%15.10lf ", stereo.coeffs[i][j / 3][j % 3]);
4135 }
4136 fprintf(f, "\n");
4137 }
4138
4139
4140 fclose(f);
4141 return true;
4142 }
4143
4144 return true;
4145}
4146
4147bool
4149{
4150 int i, j;
4151 int d = 0;
4152 FILE* f = fopen(filename, "r");
4153
4154 isCalibrated = false;
4155
4156 if (!f)
4157 {
4158 return false;
4159 }
4160
4161 if (fscanf(f, "%d", &d) != 1 || d <= 0 || d > 10)
4162 {
4163 return false;
4164 }
4165
4166 SetCameraCount(d);
4167
4168 for (i = 0; i < cameraCount; i++)
4169 {
4170 for (j = 0; j < (int)(sizeof(cameraParams[i]) / sizeof(float)); j++)
4171 {
4172 int values_read = fscanf(f, "%f", &((float*)(cameraParams + i))[j]);
4173 CV_Assert(values_read == 1);
4174 }
4175 }
4176
4177
4178 /* Load stereo params */
4179
4180 /* load quad */
4181 for (i = 0; i < 2; i++)
4182 {
4183 for (j = 0; j < 4; j++)
4184 {
4185 int values_read = fscanf(f, "%f ", &(stereo.quad[i][j].x));
4186 CV_Assert(values_read == 1);
4187 values_read = fscanf(f, "%f ", &(stereo.quad[i][j].y));
4188 CV_Assert(values_read == 1);
4189 }
4190 }
4191
4192 /* Load coeffs */
4193 for (i = 0; i < 2; i++)
4194 {
4195 for (j = 0; j < 9; j++)
4196 {
4197 int values_read = fscanf(f, "%lf ", &(stereo.coeffs[i][j / 3][j % 3]));
4198 CV_Assert(values_read == 1);
4199 }
4200 }
4201
4202
4203 fclose(f);
4204
4205 stereo.warpSize =
4206 cvSize(cvRound(cameraParams[0].imgSize[0]), cvRound(cameraParams[0].imgSize[1]));
4207
4208 isCalibrated = true;
4209
4210 return true;
4211}
4212
4213bool
4214CvCalibFilter::Rectify(IplImage** srcarr, IplImage** dstarr)
4215{
4216 return Rectify((CvMat**)srcarr, (CvMat**)dstarr);
4217}
4218
4219bool
4220CvCalibFilter::Rectify(CvMat** srcarr, CvMat** dstarr)
4221{
4222 int i;
4223
4224 if (!srcarr || !dstarr)
4225 {
4226 assert(0);
4227 return false;
4228 }
4229
4230 if (isCalibrated && cameraCount == 2)
4231 {
4232 for (i = 0; i < cameraCount; i++)
4233 {
4234 if (srcarr[i] && dstarr[i])
4235 {
4236 IplImage src_stub, *src;
4237 IplImage dst_stub, *dst;
4238
4239 src = cvGetImage(srcarr[i], &src_stub);
4240 dst = cvGetImage(dstarr[i], &dst_stub);
4241
4242 if (src->imageData == dst->imageData)
4243 {
4244 if (!undistImg || undistImg->width != src->width ||
4245 undistImg->height != src->height ||
4246 CV_MAT_CN(undistImg->type) != src->nChannels)
4247 {
4248 cvReleaseMat(&undistImg);
4249 undistImg =
4250 cvCreateMat(src->height, src->width, CV_8U + (src->nChannels - 1) * 8);
4251 }
4252 cvCopy(src, undistImg);
4253 src = cvGetImage(undistImg, &src_stub);
4254 }
4255
4256 cvZero(dst);
4257
4258 if (!rectMap[i][0] || rectMap[i][0]->width != src->width ||
4259 rectMap[i][0]->height != src->height)
4260 {
4261 cvReleaseMat(&rectMap[i][0]);
4262 cvReleaseMat(&rectMap[i][1]);
4263 rectMap[i][0] =
4264 cvCreateMat(stereo.warpSize.height, stereo.warpSize.width, CV_32FC1);
4265 rectMap[i][1] =
4266 cvCreateMat(stereo.warpSize.height, stereo.warpSize.width, CV_32FC1);
4267 cvComputePerspectiveMap(stereo.coeffs[i], rectMap[i][0], rectMap[i][1]);
4268 }
4269 cvRemap(src, dst, rectMap[i][0], rectMap[i][1]);
4270 }
4271 }
4272 }
4273 else
4274 {
4275 for (i = 0; i < cameraCount; i++)
4276 {
4277 if (srcarr[i] != dstarr[i])
4278 {
4279 cvCopy(srcarr[i], dstarr[i]);
4280 }
4281 }
4282 }
4283
4284 return true;
4285}
4286
4287bool
4288CvCalibFilter::Undistort(IplImage** srcarr, IplImage** dstarr)
4289{
4290 return Undistort((CvMat**)srcarr, (CvMat**)dstarr);
4291}
4292
4293bool
4294CvCalibFilter::Undistort(CvMat** srcarr, CvMat** dstarr)
4295{
4296 int i;
4297
4298 if (!srcarr || !dstarr)
4299 {
4300 assert(0);
4301 return false;
4302 }
4303
4304 if (isCalibrated)
4305 {
4306 for (i = 0; i < cameraCount; i++)
4307 {
4308 if (srcarr[i] && dstarr[i])
4309 {
4310 CvMat src_stub, *src;
4311 CvMat dst_stub, *dst;
4312
4313 src = cvGetMat(srcarr[i], &src_stub);
4314 dst = cvGetMat(dstarr[i], &dst_stub);
4315
4316 if (src->data.ptr == dst->data.ptr)
4317 {
4318 if (!undistImg || undistImg->width != src->width ||
4319 undistImg->height != src->height || CV_ARE_TYPES_EQ(undistImg, src))
4320 {
4321 cvReleaseMat(&undistImg);
4322 undistImg = cvCreateMat(src->height, src->width, src->type);
4323 }
4324
4325 cvCopy(src, undistImg);
4326 src = undistImg;
4327 }
4328
4329#if 1
4330 {
4331 CvMat A = cvMat(3, 3, CV_32FC1, cameraParams[i].matrix);
4332 CvMat k = cvMat(1, 4, CV_32FC1, cameraParams[i].distortion);
4333
4334 if (!undistMap[i][0] || undistMap[i][0]->width != src->width ||
4335 undistMap[i][0]->height != src->height)
4336 {
4337 cvReleaseMat(&undistMap[i][0]);
4338 cvReleaseMat(&undistMap[i][1]);
4339 undistMap[i][0] = cvCreateMat(src->height, src->width, CV_32FC1);
4340 undistMap[i][1] = cvCreateMat(src->height, src->width, CV_32FC1);
4341 cvInitUndistortMap(&A, &k, undistMap[i][0], undistMap[i][1]);
4342 }
4343
4344 cvRemap(src, dst, undistMap[i][0], undistMap[i][1]);
4345#else
4346 cvUndistort2(src, dst, &A, &k);
4347#endif
4348 }
4349 }
4350 }
4351 }
4352
4353 else
4354 {
4355 for (i = 0; i < cameraCount; i++)
4356 {
4357 if (srcarr[i] != dstarr[i])
4358 {
4359 cvCopy(srcarr[i], dstarr[i]);
4360 }
4361 }
4362 }
4363
4364
4365 return true;
4366}
#define float
Definition 16_Level.h:22
class A(deque< T, A >)) ARMARX_OVERLOAD_STD_HASH_FOR_ITERABLE((class T
Enables hashing of std::list.
constexpr T c
int icvConvertPointSystem(CvPoint3D64d M2, CvPoint3D64d *M1, CvMatr64d rotMatr, CvMatr64d transVect)
int icvConvertWarpCoordinates(double coeffs[3][3], CvPoint2D32f *cameraPoint, CvPoint2D32f *warpPoint, int direction)
void cvProjectPointsSimple(int point_count, CvPoint3D64f *_object_points, double *_rotation_matrix, double *_translation_vector, double *_camera_matrix, double *_distortion, CvPoint2D64f *_image_points)
void icvGetQuadsTransform(CvSize imageSize, CvMatr64d camMatr1, CvMatr64d rotMatr1, CvVect64d transVect1, CvMatr64d camMatr2, CvMatr64d rotMatr2, CvVect64d transVect2, CvSize *warpSize, double quad1[4][2], double quad2[4][2], CvMatr64d fundMatr, CvPoint3D64d *epipole1, CvPoint3D64d *epipole2)
CV_IMPL void cvComputePerspectiveMap(const double c[3][3], CvArr *rectMapX, CvArr *rectMapY)
int icvGetAngleLine(CvPoint2D64d startPoint, CvSize imageSize, CvPoint2D64d *point1, CvPoint2D64d *point2)
float * CvVect32f
void icvGetPieceLength3D(CvPoint3D64d point1, CvPoint3D64d point2, double *dist)
void icvGetCrossDirectDirect(CvVect64d direct1, CvVect64d direct2, CvPoint2D64d *cross, int *result)
float * CvMatr32f
void icvGetCrossPieceDirect(CvPoint2D64d p_start, CvPoint2D64d p_end, double a, double b, double c, CvPoint2D64d *cross, int *result)
#define __END__
CV_IMPL void cvInitPerspectiveTransform(CvSize size, const CvPoint2D32f quad[4], double matrix[3][3], CvArr *rectMap)
double * CvMatr64d
CV_INLINE void icvInvertMatrix_64d(double *A, int n, double *invA)
CV_INLINE void icvSubVector_64d(const double *src1, const double *src2, double *dst, int len)
int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr, CvPoint3D32f *epipole1, CvPoint3D32f *epipole2)
void icvGetQuadsTransformStruct(CvStereoCamera *stereoCamera)
void icvGetPieceLength(CvPoint2D64d point1, CvPoint2D64d point2, double *dist)
void icvGetMiddleAnglePoint(CvPoint2D64d basePoint, CvPoint2D64d point1, CvPoint2D64d point2, CvPoint2D64d *midPoint)
#define icvCheckVector_64f(ptr, len)
double cvPseudoInverse(const CvArr *src, CvArr *dst)
int icvComputeCoeffForStereo(CvStereoCamera *stereoCamera)
void icvTestPoint(CvPoint2D64d testPoint, CvVect64d line1, CvVect64d line2, CvPoint2D64d basePoint, int *result)
int icvComputeStereoLineCoeffs(CvPoint3D64d pointA, CvPoint3D64d pointB, CvPoint3D64d pointCam1, double gamma, CvStereoLineCoeff *coeffs)
void icvComputeeInfiniteProject1(CvMatr64d rotMatr, CvMatr64d camMatr1, CvMatr64d camMatr2, CvPoint2D32f point1, CvPoint2D32f *point2)
int cvConvertEssential2Fundamental(CvMatr32f essMatr, CvMatr32f fundMatr, CvMatr32f cameraMatr1, CvMatr32f cameraMatr2)
CV_INLINE void icvMulMatrix_64d(const double *src1, int w1, int h1, const double *src2, int w2, int h2, double *dst)
#define icvCheckVector_32f(ptr, len)
int icvGetCrossLines(CvPoint3D64d point11, CvPoint3D64d point12, CvPoint3D64d point21, CvPoint3D64d point22, CvPoint3D64d *midPoint)
CV_INLINE CvSize cvGetMatSize(const CvMat *mat)
int icvGetDirectionForPoint(CvPoint2D64d point, CvMatr64d camMatr, CvPoint3D64d *direct)
void icvGetCrossRectDirect(CvSize imageSize, double a, double b, double c, CvPoint2D64d *start, CvPoint2D64d *end, int *result)
CvPoint2D64f CvPoint2D64d
int icvGetCrossPieceVector(CvPoint2D32f p1_start, CvPoint2D32f p1_end, CvPoint2D32f v2_start, CvPoint2D32f v2_end, CvPoint2D32f *cross)
int icvCompute3DPoint(double alpha, double betta, CvStereoLineCoeff *coeffs, CvPoint3D64d *point)
#define __BEGIN__
CvStatus
@ CV_DIV_BY_ZERO_ERR
@ CV_BADSIZE_ERR
@ CV_BADSCALE_ERR
@ CV_NOTDEFINED_ERR
@ CV_BADRANGE_ERR
@ CV_UNSUPPORTED_CHANNELS_ERR
@ CV_BADCHANNELS_ERR
@ CV_BADCOEF_ERR
@ CV_BADDEPTH_ERR
@ CV_NOTFOUND_ERR
@ CV_OK
@ CV_UNSUPPORTED_FORMAT_ERR
@ CV_UNMATCHED_ROI_ERR
@ CV_BADPOINT_ERR
@ CV_UNSUPPORTED_COI_ERR
@ CV_BADARG_ERR
@ CV_BADFACTOR_ERR
@ CV_BADHEADER_ERR
@ CV_UNMATCHED_FORMATS_ERR
@ CV_BADSTEP_ERR
@ CV_UNSUPPORTED_DEPTH_ERR
@ CV_OUTOFMEM_ERR
@ CV_BADFLAG_ERR
@ CV_NULLPTR_ERR
@ CV_BADROI_ERR
@ CV_NO_ERR
@ CV_BADMEMBLOCK_ERR
@ CV_INPLACE_NOT_SUPPORTED_ERR
@ CV_BADCONVERGENCE_ERR
int icvStereoCalibration(int numImages, int *nums, CvSize imageSize, CvPoint2D32f *imagePoints1, CvPoint2D32f *imagePoints2, CvPoint3D32f *objectPoints, CvStereoCamera *stereoparams)
int cvComputeEssentialMatrix(CvMatr32f rotMatr, CvMatr32f transVect, CvMatr32f essMatr)
int icvGetSymPoint3D(CvPoint3D64d pointCorner, CvPoint3D64d point1, CvPoint3D64d point2, CvPoint3D64d *pointSym2)
int icvComCoeffForLine(CvPoint2D64d point1, CvPoint2D64d point2, CvPoint2D64d point3, CvPoint2D64d point4, CvMatr64d camMatr1, CvMatr64d rotMatr1, CvMatr64d transVect1, CvMatr64d camMatr2, CvMatr64d rotMatr2, CvMatr64d transVect2, CvStereoLineCoeff *coeffs, int *needSwapCamera)
CV_IMPL IplImage * icvCreateIsometricImage(IplImage *src, IplImage *dst, int desired_depth, int desired_num_channels)
int icvComputeRestStereoParams(CvStereoCamera *stereoparams)
CvPoint3D64f CvPoint3D64d
#define cvmMul(src1, src2, dst)
void icvGetCutPiece(CvVect64d areaLineCoef1, CvVect64d areaLineCoef2, CvPoint2D64d epipole, CvSize imageSize, CvPoint2D64d *point11, CvPoint2D64d *point12, CvPoint2D64d *point21, CvPoint2D64d *point22, int *result)
void icvProjectPointToImage(CvPoint3D64d point, CvMatr64d camMatr, CvMatr64d rotMatr, CvVect64d transVect, CvPoint2D64d *projPoint)
double * CvVect64d
#define EPS64D
CV_IMPL double icvGetVect(CvPoint2D64d basePoint, CvPoint2D64d point1, CvPoint2D64d point2)
CV_INLINE void icvTransposeMatrix_64d(const double *src, int w, int h, double *dst)
float icvDefinePointPosition(CvPoint2D32f point1, CvPoint2D32f point2, CvPoint2D32f point)
void icvGetCoefForPiece(CvPoint2D64d p_start, CvPoint2D64d p_end, double *a, double *b, double *c, int *result)
void icvGetCrossPiecePiece(CvPoint2D64d p1_start, CvPoint2D64d p1_end, CvPoint2D64d p2_start, CvPoint2D64d p2_end, CvPoint2D64d *cross, int *result)
CV_INLINE void icvMulMatrix_32f(const float *src1, int w1, int h1, const float *src2, int w2, int h2, float *dst)
#define icvTransformVector_64d(matr, src, dst, w, h)
int icvCreateConvertMatrVect(CvMatr64d rotMatr1, CvMatr64d transVect1, CvMatr64d rotMatr2, CvMatr64d transVect2, CvMatr64d convRotMatr, CvMatr64d convTransVect)
void icvProjectPointToDirect(CvPoint2D64d point, CvVect64d lineCoeff, CvPoint2D64d *projectPoint)
void icvGetDistanceFromPointToDirect(CvPoint2D64d point, CvVect64d lineCoef, double *dist)
void icvComputeeInfiniteProject2(CvMatr64d rotMatr, CvMatr64d camMatr1, CvMatr64d camMatr2, CvPoint2D32f *point1, CvPoint2D32f point2)
@ CV_MAT64D
@ CV_MAT3x1_64D
@ CV_MAT4x4_32F
@ CV_MAT3x3_32F
@ CV_MAT3x3_64D
@ CV_MAT32F
@ CV_MAT4x1_64D
@ CV_MAT3x1_32F
@ CV_MAT4x4_64D
@ CV_MAT4x1_32F
int icvGetCrossLineDirect(CvPoint2D32f p1, CvPoint2D32f p2, float a, float b, float c, CvPoint2D32f *cross)
void icvGetNormalDirect(CvVect64d direct, CvPoint2D64d point, CvVect64d normDirect)
int cvFindChessBoardCornerGuesses(const void *arr, void *, CvMemStorage *, CvSize pattern_size, CvPoint2D32f *corners, int *corner_count)
void icvComputeStereoParamsForCameras(CvStereoCamera *stereoCamera)
CV_INLINE void icvAddVector_64d(const double *src1, const double *src2, double *dst, int len)
CvCalibEtalonType
Definition calibfilter.h:69
@ CV_CALIB_ETALON_USER
Definition calibfilter.h:70
@ CV_CALIB_ETALON_CHESSBOARD
Definition calibfilter.h:71
#define CV_WARP_TO_CAMERA
Definition calibfilter.h:62
#define CV_CAMERA_TO_WARP
Definition calibfilter.h:61
int latestCounts[MAX_CAMERAS]
CvPoint2D32f * points[MAX_CAMERAS]
virtual bool SaveCameraParams(const char *filename)
CvMat * undistImg
CvMat * tempImg
virtual bool SetFrames(int totalFrames)
CvMat * grayImg
virtual void Stop(bool calibrate=false)
virtual bool SetCameraParams(CvCamera *params)
virtual bool Undistort(IplImage **src, IplImage **dst)
CvCalibEtalonType etalonType
virtual bool Rectify(IplImage **srcarr, IplImage **dstarr)
virtual bool Push(const CvPoint2D32f **points=0)
virtual bool FindEtalon(IplImage **imgs)
CvMat * undistMap[MAX_CAMERAS][2]
virtual const CvCamera * GetCameraParams(int idx=0) const
CvPoint2D32f * latestPoints[MAX_CAMERAS]
CvPoint2D32f * etalonPoints
CvStereoCamera stereo
virtual const CvStereoCamera * GetStereoParams() const
virtual void DrawPoints(IplImage **dst)
virtual void SetCameraCount(int cameraCount)
virtual CvCalibEtalonType GetEtalon(int *paramCount=0, const double **etalonParams=0, int *pointCount=0, const CvPoint2D32f **etalonPoints=0) const
CvMat * rectMap[MAX_CAMERAS][2]
virtual ~CvCalibFilter()
CvMemStorage * storage
virtual bool LoadCameraParams(const char *filename)
virtual int GetFrameCount(int *framesTotal=0) const
virtual bool SetEtalon(CvCalibEtalonType etalonType, double *etalonParams, int pointCount=0, CvPoint2D32f *points=0)
CvCamera cameraParams[MAX_CAMERAS]
double * etalonParams
virtual bool GetLatestPoints(int idx, CvPoint2D32f **pts, int *count, bool *found)
VectorXD< D, T > sqrt(const VectorXD< D, T > &a)
Definition VectorXD.h:704
This file offers overloads of toIce() and fromIce() functions for STL container types.
double distance(const Point &a, const Point &b)
Definition point.hpp:95
Point cross(const Point &x, const Point &y)
Definition point.hpp:35