 |
Visual Servoing Platform
version 3.3.0
|
62 #include <visp3/core/vpConfig.h>
63 #include <visp3/core/vpDebug.h>
64 #if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394))
68 #include <visp3/blob/vpDot.h>
69 #include <visp3/core/vpDisplay.h>
70 #include <visp3/core/vpException.h>
71 #include <visp3/core/vpHomogeneousMatrix.h>
72 #include <visp3/core/vpImage.h>
73 #include <visp3/core/vpImagePoint.h>
74 #include <visp3/core/vpMath.h>
75 #include <visp3/core/vpPoint.h>
76 #include <visp3/gui/vpDisplayGTK.h>
77 #include <visp3/gui/vpDisplayOpenCV.h>
78 #include <visp3/gui/vpDisplayX.h>
79 #include <visp3/io/vpImageIo.h>
80 #include <visp3/robot/vpRobotAfma6.h>
81 #include <visp3/sensor/vp1394TwoGrabber.h>
82 #include <visp3/vision/vpPose.h>
83 #include <visp3/visual_features/vpFeatureBuilder.h>
84 #include <visp3/visual_features/vpFeaturePoint.h>
85 #include <visp3/vs/vpServo.h>
86 #include <visp3/vs/vpServoDisplay.h>
108 vpDisplayX display(I, 100, 100,
"Current image");
109 #elif defined(VISP_HAVE_OPENCV)
111 #elif defined(VISP_HAVE_GTK)
118 std::cout << std::endl;
119 std::cout <<
"-------------------------------------------------------" << std::endl;
120 std::cout <<
" Test program for vpServo " << std::endl;
121 std::cout <<
" Eye-to-hand task control" << std::endl;
122 std::cout <<
" Simulation " << std::endl;
123 std::cout <<
" task : servo a point " << std::endl;
124 std::cout <<
"-------------------------------------------------------" << std::endl;
125 std::cout << std::endl;
132 for (i = 0; i < nbPoint; i++) {
153 robot.getCameraParameters(cam, I);
158 for (i = 0; i < nbPoint; i++) {
171 std::cout << cMo << std::endl;
179 std::cout <<
" Learning 0/1 " << std::endl;
180 char name[FILENAME_MAX];
181 sprintf(name,
"cdMo.dat");
183 std::cin >> learning;
186 vpTRACE(
"Save the location of the object in a file cdMo.dat");
187 std::ofstream f(name);
194 vpTRACE(
"Loading desired location from cdMo.dat");
195 std::ifstream f(
"cdMo.dat");
204 for (i = 0; i < nbPoint; i++) {
216 vpTRACE(
"\t we want an eye-in-hand control law");
217 vpTRACE(
"\t robot is controlled in the camera frame");
221 for (i = 0; i < nbPoint; i++) {
225 vpTRACE(
"Display task information ");
230 double convergence_threshold = 0.00;
235 unsigned int iter = 0;
242 oMcamrobot[0][3] = -0.05;
247 double lambda_av = 0.1;
251 std::cout <<
"alpha 0.7" << std::endl;
253 std::cout <<
"beta 5" << std::endl;
255 std::list<vpImagePoint> Lcog;
257 while (error > convergence_threshold) {
258 std::cout <<
"---------------------------------------------" << iter++ << std::endl;
269 for (i = 0; i < nbPoint; i++) {
271 Lcog.push_back(dot[i].getCog());
274 vpTRACE(
"Error detected while tracking visual features");
282 for (i = 0; i < nbPoint; i++) {
306 robot.get_cMe(camrobotMe);
307 cMe = cMo * oMcamrobot * camrobotMe;
318 if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
321 gain = alpha * exp(-beta * (task.
getError()).sumSquare()) + lambda_av;
328 vpTRACE(
"%f %f %f %f %f", alpha, beta, lambda_av, (task.
getError()).sumSquare(), gain);
334 for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog) {
340 error = (task.
getError()).sumSquare();
341 std::cout <<
"|| s - s* || = " << error << std::endl;
344 vpTRACE(
"Error detected while tracking visual features");
353 if ((SAVE == 1) && (iter % 3 == 0)) {
356 sprintf(name,
"/tmp/marchand/image.%04d.ppm", it++);
367 std::cout <<
"Test failed with exception: " << e << std::endl;
375 std::cout <<
"You do not have an afma6 robot connected to your computer..." << std::endl;
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void setVideoMode(vp1394TwoVideoModeType videomode)
void load(std::ifstream &f)
void save(std::ofstream &f) const
Generic class defining intrinsic camera parameters.
void projection(const vpColVector &_cP, vpColVector &_p)
void print() const
Print the matrix as a pose vector .
void set_eJe(const vpMatrix &eJe_)
Control of Irisa's gantry robot named Afma6.
void setFramerate(vp1394TwoFramerateType fps)
@ vpVIDEO_MODE_640x480_MONO8
void track(const vpImage< unsigned char > &I)
vpImagePoint getCog() const
void addPoint(const vpPoint &P)
void set_cVe(const vpVelocityTwistMatrix &cVe_)
static void displayPoint(const vpImage< unsigned char > &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness=1)
Implementation of column vector and the associated operations.
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Implementation of a matrix and operations on matrices.
void setServo(const vpServoType &servo_type)
virtual vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState)
static void write(const vpImage< unsigned char > &I, const std::string &filename)
static const vpColor green
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
static void display(const vpImage< unsigned char > &I)
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
void setWorldCoordinates(double oX, double oY, double oZ)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
vpColVector getError() const
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void get_eJe(vpMatrix &eJe)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_y(double y)
Set the point y coordinate in the image plane.
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, unsigned int select=vpBasicFeature::FEATURE_ALL)
void acquire(vpImage< unsigned char > &I)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
static const vpColor blue
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &_cP)
vpColVector computeControlLaw()
static void flush(const vpImage< unsigned char > &I)
void open(vpImage< unsigned char > &I)
Class that defines what is a point.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void set_x(double x)
Set the point x coordinate in the image plane.
void initTracking(const vpImage< unsigned char > &I)
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
error that can be emited by ViSP classes.
void setGraphics(bool activate)
This tracker is meant to track a dot (connected pixels with same gray level) on a vpImage.
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, unsigned int thickness=1)