。私はそれが完全に正しいかどうか分からないが、それは受け入れられる結果を与える。
Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
detectorParams->doCornerRefinement = true;
Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(kMarkerDictionaryID));
Mat camMatrix, distCoeffs;
if (!readCameraParameters(kCameraParametersFile, camMatrix, distCoeffs)) {
// ...
return;
}
VideoCapture inputVideo;
inputVideo.open(kCameraID);
while (inputVideo.grab()) {
Mat image;
inputVideo.retrieve(image);
vector<int> ids;
vector< vector<Point2f> > corners;
vector<Vec3d> rvecs, tvecs;
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams);
if (ids.size() > 0) {
aruco::estimatePoseSingleMarkers(corners, kMarkerLengthInMeters, camMatrix, distCoeffs, rvecs, tvecs);
for (unsigned int i = 0; i < ids.size(); i++) {
// if (ids[i] != kMarkerID)
// continue;
// Calc camera pose
Mat R;
Rodrigues(rvecs[i], R);
Mat cameraPose = -R.t() * (Mat)tvecs[i];
double x = cameraPose.at<double>(0,0);
double y = cameraPose.at<double>(0,1);
double z = cameraPose.at<double>(0,2);
// So z is the distance between camera and marker
// Or if you need rotation invariant offsets
// x = tvecs[i][0];
// y = tvecs[i][0];
// z = tvecs[i][0];
cout << "X: " << x << " Y: " << y << " Z: " << z << endl;
aruco::drawAxis(image, camMatrix, distCoeffs, rvecs[i], tvecs[i], kMarkerLengthInMeters * 0.5f);
}
aruco::drawDetectedMarkers(image, corners, resultIds);
}
resize(image, image, Size(image.cols/2, image.rows/2));
imshow("out", image);
char key = (char)waitKey(kWaitTimeInmS);
if (key == 27) break;
}