UsTK : Ultrasound ToolKit  version 2.0.1 under development (2025-01-22)
tutorial-servo-target-confidence.cpp
1 #include <iostream>
4 
5 #include <visp3/core/vpImageConvert.h>
6 #include <visp3/core/vpMutex.h>
7 #include <visp3/core/vpThread.h>
8 #include <visp3/core/vpTime.h>
9 #include <visp3/gui/vpDisplayX.h>
10 #include <visp3/gui/vpPlot.h>
11 #include <visp3/robot/vpRobotViper850.h>
12 #include <visp3/sensor/vpV4l2Grabber.h>
13 
14 #include <visp3/ustk_core/usImagePostScan2D.h>
15 #include <visp3/ustk_core/usImagePreScan2D.h>
16 #include <visp3/ustk_core/usPixelMeterConversion.h>
17 #include <visp3/ustk_core/usPostScanToPreScan2DConverter.h>
18 #include <visp3/ustk_core/usPreScanToPostScan2DConverter.h>
19 
20 #include <visp3/ustk_confidence_map/usScanlineConfidence2D.h>
21 
22 #include <visp3/ustk_template_tracking/usDenseTracker2D.h>
23 
24 #if defined(VISP_HAVE_V4L2) && defined(VISP_HAVE_PTHREAD) && defined(VISP_HAVE_VIPER850)
25 
26 // Shared vars
27 typedef enum { capture_waiting, capture_started, capture_stopped } t_CaptureState;
28 t_CaptureState s_capture_state = capture_waiting;
29 vpImage<unsigned char> s_frame;
30 vpMutex s_mutex_capture;
31 
32 typedef enum { ROI_waiting, ROI_started } t_stateInitROI;
33 t_stateInitROI s_ROI_state = ROI_waiting;
34 vpMutex s_mutex_ROI_state;
35 
36 bool s_robotIsRunning;
37 vpMutex s_mutex_control_velocity;
38 vpColVector s_controlVelocity(6, 0.);
39 
41 
43 vpThread::Return captureFunction(vpThread::Args args)
44 {
45  vpV4l2Grabber cap = *((vpV4l2Grabber *)args);
46  vpImage<unsigned char> frame_;
47  bool stop_capture_ = false;
48 
49  cap.open(frame_);
50 
51  vpRect roi(vpImagePoint(55, 70), vpImagePoint(410, 555)); // roi to remove sonosite banners
52 
53  while (!stop_capture_) {
54  // Capture in progress
55  cap.acquire(frame_, roi); // get a new frame from camera
56 
57  // Update shared data
58  {
59  vpMutex::vpScopedLock lock(s_mutex_capture);
60  if (s_capture_state == capture_stopped)
61  stop_capture_ = true;
62  else {
63  s_capture_state = capture_started;
64  }
65  s_frame = frame_;
66  }
67  }
68 
69  {
70  vpMutex::vpScopedLock lock(s_mutex_capture);
71  s_capture_state = capture_stopped;
72  }
73  std::cout << "End of capture thread" << std::endl;
74  return 0;
75 }
77 
79 vpThread::Return displayFunction(vpThread::Args args)
80 {
81  (void)args; // Avoid warning: unused parameter args
84  usImagePostScan2D<unsigned char> confidencePostScan_;
85  usImagePreScan2D<unsigned char> confidencePreScan_;
86  usPostScanToPreScan2DConverter backConverter_;
88  usScanlineConfidence2D confidenceMapProcessor_;
89  usDenseTracker2D tracker;
90  vpRectOriented rectangle;
91  bool rectangleRoiDefined = false;
92  t_stateInitROI stateROI = ROI_waiting;
93 
94  postScan_.setTransducerRadius(0.060);
95  postScan_.setTransducerConvexity(true);
96  postScan_.setScanLineNumber(128);
97  postScan_.setFieldOfView(vpMath::rad(57.0)); // field of view is 57 deg
98  postScan_.setDepth(0.12);
99  postScan_.setProbeName("Sonosite C60");
100 
101  t_CaptureState capture_state_;
102  bool display_initialized_ = false;
103 
104 #if defined(VISP_HAVE_X11)
105  vpDisplayX *dpost_scan_ = NULL; // display post-scan image
106  vpDisplayX *dpost_scan_confidence_ = NULL; // display post-scan image
107 #endif
108 
109  vpPlot plot(2);
110  plot.initGraph(0, 1);
111  plot.setTitle(0, "X target error");
112  plot.setUnitY(0, "error");
113  plot.setLegend(0, 0, "time");
114 
115  plot.initGraph(1, 1);
116  plot.setTitle(1, "confidence barycenter error");
117  plot.setUnitY(1, "error");
118  plot.setLegend(1, 0, "time");
119 
120  // BUG :
121  // plot.initRange(0, 0, 10, -0.01, 0.01);
122  // plot.initRange(1, 0, 10, 5.0, 5.0);
123 
124  double xtarget;
125  double ytarget;
126 
127  vpImagePoint ROI_centralPoint;
128 
129  double startTime = vpTime::measureTimeMs();
130 
131  do {
132  s_mutex_capture.lock();
133  capture_state_ = s_capture_state;
134  s_mutex_capture.unlock();
135 
136  // Check if a frame is available
137  if (capture_state_ == capture_started) {
138  // Create a copy of the captured frame
139  {
140  vpMutex::vpScopedLock lock(s_mutex_capture);
141  postScan_.setData(s_frame);
142  postScan_.setHeightResolution(
143  (postScan_.getDepth() + postScan_.getTransducerRadius() * (1 - cos(postScan_.getFieldOfView() / 2.0))) /
144  postScan_.getHeight());
145  postScan_.setWidthResolution(
146  (postScan_.getDepth() + postScan_.getTransducerRadius() * (1 - cos(postScan_.getFieldOfView() / 2.0))) /
147  postScan_.getHeight());
148  }
149 
150  // target init (wait for ROI definition)
151  if (rectangleRoiDefined && stateROI == ROI_waiting) {
152  tracker.init(postScan_, rectangle);
153  s_mutex_ROI_state.lock();
154  s_ROI_state = ROI_started;
155  s_mutex_ROI_state.unlock();
156  stateROI = ROI_started;
157  }
158 
159  // confidence
160  backConverter_.convert(postScan_, preScan_, 480);
161  // Compute confidence map on pre-scan image
162  confidenceMapProcessor_.run(confidencePreScan_, preScan_);
163  // converting computed confidence map in post-scan
164  converter_.convert(confidencePreScan_, confidencePostScan_);
165 
166  // if ROI defined, we can send commands
167  if (stateROI == ROI_started) {
168  tracker.update(postScan_);
169  rectangle = tracker.getTarget();
170 
171  usPixelMeterConversion::convert(postScan_, rectangle.getCenter().get_j(), rectangle.getCenter().get_i(),
172  xtarget, ytarget);
173 
174  double ttarget = atan2(xtarget, ytarget);
175 
176  double time = (vpTime::measureTimeMs() - startTime) / 1000.0;
177 
178  unsigned int height(confidencePreScan_.getHeight()), width(confidencePreScan_.getWidth());
179 
180  double I_sum = 0.0;
181  for (unsigned int i = 0; i < height; ++i)
182  for (unsigned int j = 0; j < width; ++j)
183  I_sum += static_cast<double>(confidencePreScan_[i][j]);
184 
185  double yc = 0.0;
186  for (unsigned int x = 0; x < height; ++x)
187  for (unsigned int y = 0; y < width; ++y) {
188  yc += y * confidencePreScan_[x][y];
189  }
190  yc /= I_sum;
191 
192  double tc = yc * confidencePreScan_.getScanLinePitch() - confidencePreScan_.getFieldOfView() / 2.0;
193 
194  plot.plot(0, 0, time, xtarget);
195  plot.plot(1, 0, time, vpMath::deg(tc));
196 
197  {
198  vpMutex::vpScopedLock lock(s_mutex_capture);
199 
200  double lambda_t = 0.8;
201  double lambda_c = 0.8;
202 
203  s_controlVelocity = 0.0;
204  s_controlVelocity[1] = -lambda_t * xtarget + lambda_c * (tc - ttarget) * ytarget;
205  s_controlVelocity[3] = lambda_c * (tc - ttarget);
206  }
207  }
208  // Check if we need to initialize the display with the first frame
209  if (!display_initialized_) {
210 // Initialize the display
211 #if defined(VISP_HAVE_X11)
212  unsigned int xpos = 10;
213  dpost_scan_ = new vpDisplayX(postScan_, xpos, 10, "post-scan");
214  xpos += 300;
215  dpost_scan_confidence_ = new vpDisplayX(confidencePostScan_, xpos, 10, "post-scan confidence");
216  display_initialized_ = true;
217 #endif
218  }
219 
220  // Display the image
221  vpDisplay::display(postScan_);
222 
223  if (rectangleRoiDefined) {
224  vpDisplay::displayLine(postScan_, static_cast<int>(rectangle.getTopLeft().get_i()),
225  static_cast<int>(rectangle.getTopLeft().get_j()),
226  static_cast<int>(rectangle.getBottomLeft().get_i()),
227  static_cast<int>(rectangle.getBottomLeft().get_j()), vpColor::red);
228  vpDisplay::displayLine(postScan_, static_cast<int>(rectangle.getBottomLeft().get_i()),
229  static_cast<int>(rectangle.getBottomLeft().get_j()),
230  static_cast<int>(rectangle.getBottomRight().get_i()),
231  static_cast<int>(rectangle.getBottomRight().get_j()), vpColor::red);
232  vpDisplay::displayLine(postScan_, static_cast<int>(rectangle.getBottomRight().get_i()),
233  static_cast<int>(rectangle.getBottomRight().get_j()),
234  static_cast<int>(rectangle.getTopRight().get_i()),
235  static_cast<int>(rectangle.getTopRight().get_j()), vpColor::red);
236  vpDisplay::displayLine(postScan_, static_cast<int>(rectangle.getTopRight().get_i()),
237  static_cast<int>(rectangle.getTopRight().get_j()),
238  static_cast<int>(rectangle.getTopLeft().get_i()),
239  static_cast<int>(rectangle.getTopLeft().get_j()), vpColor::red);
240  }
241 
242  // Display the confidence map
243  vpDisplay::display(confidencePostScan_);
244 
245  // ROI definition with a mouse click
246  vpDisplay::displayText(postScan_, 10, 10, "Click to define ROI...", vpColor::red);
247  if (vpDisplay::getClick(postScan_, ROI_centralPoint, false)) {
248  // update rectangle only on the first click
249  if (stateROI == ROI_waiting) {
250  rectangle.setCenter(vpImagePoint(ROI_centralPoint.get_v(), ROI_centralPoint.get_u()));
251  rectangle.setSize(80, 50);
252  rectangle.setOrientation(0);
253  rectangleRoiDefined = true;
254  }
255  }
256 
257  // Trigger end of acquisition with a mouse click on confidence
258  vpDisplay::displayText(confidencePostScan_, 10, 10, "Click to exit...", vpColor::red);
259  if (vpDisplay::getClick(confidencePostScan_, false)) {
260  vpMutex::vpScopedLock lock(s_mutex_capture);
261  s_capture_state = capture_stopped;
262  }
263 
264  // Update the display
265  vpDisplay::flush(postScan_);
266  vpDisplay::flush(confidencePostScan_);
267  } else {
268  vpTime::wait(2); // Sleep 2ms
269  }
270  } while (capture_state_ != capture_stopped);
271 
272 #if defined(VISP_HAVE_X11)
273  delete dpost_scan_;
274  delete dpost_scan_confidence_;
275 #endif
276 
277  std::cout << "End of display thread" << std::endl;
278  return 0;
279 }
281 
282 vpThread::Return controlFunction(vpThread::Args args)
283 {
284  (void)args;
285  vpRobotViper850 robot;
286 
287  vpMatrix eJe; // robot jacobian
288 
289  // Transformation from end-effector frame to the force/torque sensor
290  // Note that the end-effector frame is located on the lower part of
291  // male component of the tool changer.
292  vpHomogeneousMatrix eMs;
293  eMs[2][3] = -0.062; // tz = -6.2cm
294 
295  // Transformation from force/torque sensor to the probe frame from where
296  // we want to control the robot
297  vpHomogeneousMatrix sMp;
298 
299  // Transformation from force/torque sensor to the end-effector frame
300  vpHomogeneousMatrix sMe;
301  eMs.inverse(sMe);
302 
303  // Build the transformation that allows to convert a velocity in the
304  // end-effector frame to the FT sensor frame
305  vpVelocityTwistMatrix sVe;
306  sVe.buildFrom(sMe);
307 
308  vpColVector sHs(6); // force/torque sensor measures
309  vpColVector sHs_star(6); // force/torque sensor desired values in sensor frame
310  vpColVector pHp_star(6); // force/torque sensor desired values in probe frame
311  vpColVector gHg(6); // force/torque due to the gravity
312  vpMatrix lambda(6, 6);
313  // Position of the cog of the object attached after the sensor in the sensor frame
314  vpTranslationVector stg;
315  vpColVector sHs_bias(6); // force/torque sensor measures for bias
316 
317  // Cartesian velocities corresponding to the force/torque control in the
318  // sensor frame
319  vpColVector v_s(6);
320  // Joint velocities corresponding to the force/torque control
321  vpColVector q_dot(6);
322 
323  // Initialized the force gain
324  lambda = 0;
325  for (int i = 0; i < 3; i++)
326  lambda[i][i] = 0.02 / 6;
327  // Initialized the torque gain
328  for (int i = 3; i < 6; i++)
329  lambda[i][i] = 1. / 2;
330 
331  // Initialize the desired force/torque values
332  pHp_star = 0;
333  pHp_star[2] = 3; // Fz = 3N
334  //
335  // Case of the C65 US probe
336  //
337  // Set the probe frame control
338  sMp[2][3] = 0.262; // tz = 26.2cm
339 
340  // Init the force/torque due to the gravity
341  gHg[2] = -(0.696 + 0.476) * 9.81; // m*g
342 
343  // Position of the cog of the object attached after the sensor in the sensor frame
344  stg[0] = 0;
345  stg[1] = 0;
346  stg[2] = 0.088; // tz = 88.4mm
347 
348  vpRotationMatrix sRp;
349  sMp.extract(sRp);
350  vpTranslationVector stp;
351  sMp.extract(stp);
352 
353  vpHomogeneousMatrix eMp = eMs * sMp;
354  vpVelocityTwistMatrix eVp(eMp);
355 
356  // Get the position of the end-effector in the reference frame
357  vpColVector q;
358  vpHomogeneousMatrix fMe;
359  vpHomogeneousMatrix fMs;
360  vpRotationMatrix sRf;
361  robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
362  robot.get_fMe(q, fMe);
363  // Compute the position of the sensor frame in the reference frame
364  fMs = fMe * eMs;
365  vpHomogeneousMatrix sMf;
366  fMs.inverse(sMf);
367  sMf.extract(sRf);
368 
369  // Build the transformation that allows to convert the forces due to the
370  // gravity in the sensor frame
371  vpForceTwistMatrix sFg(sMf); // Only the rotation part is to consider
372  // Modify the translational part
373  for (int i = 0; i < 3; i++)
374  for (int j = 0; j < 3; j++)
375  sFg[i + 3][j] = (stg.skew() * sRf)[i][j];
376 
377  // Build the transformation that allows to convert a FT expressed in the
378  // FT probe frame into the sensor frame
379  vpForceTwistMatrix sFp(sMp);
380 
381  // Bias the force/torque sensor
382  std::cout << "\nBias the force/torque sensor...\n " << std::endl;
383  robot.biasForceTorqueSensor();
384 
385  // Set the robot to velocity control
386  robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
387 
388  int iter = 0;
389  t_CaptureState capture_state_;
390  t_stateInitROI ROI_state = ROI_waiting;
391 
392  std::cout << "Waiting ROI definition to start control loop..." << std::endl;
393  while (ROI_state == ROI_waiting) {
394  vpMutex::vpScopedLock lock(s_mutex_ROI_state);
395  ROI_state = s_ROI_state;
396 
397  // Case of end of capture before ROI definition
398  s_mutex_capture.lock();
399  capture_state_ = s_capture_state;
400  s_mutex_capture.unlock();
401  if (capture_state_ == capture_stopped) {
402  std::cout << "end of control thread" << std::endl;
403  return 0;
404  }
405  }
406 
407  std::cout << "Starting control loop..." << std::endl;
408  do {
409  s_mutex_capture.lock();
410  capture_state_ = s_capture_state;
411  s_mutex_capture.unlock();
412 
413  // Check if a frame is available
414  if (capture_state_ == capture_started) {
415 
416  // Get the force/torque measures from the sensor
417  sHs = robot.getForceTorque();
418 
419  // Multiply the measures by -1 to get the force/torque exerced by the
420  // robot to the environment.
421  sHs *= -1;
422 
423  // Update the gravity transformation matrix
424  robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
425  robot.get_fMe(q, fMe);
426  // Compute the position of the sensor frame in the reference frame
427  fMs = fMe * eMs;
428  // Compute the inverse transformation
429  fMs.inverse(sMf);
430  sMf.extract(sRf);
431  // Update the transformation that allows to convert the forces due to the
432  // gravity in the sensor frame
433  sFg.buildFrom(sMf); // Only the rotation part is to consider
434  // Modify the translational part
435  for (int i = 0; i < 3; i++)
436  for (int j = 0; j < 3; j++)
437  sFg[i + 3][j] = (stg.skew() * sRf)[i][j];
438 
439  if (iter == 0) {
440  sHs_bias = sHs - sFg * gHg;
441  }
442 
443  // Compute rotation in probe frame from control velocity deduced of the confidence map barycenter
444  vpColVector v_p;
445  {
446  vpMutex::vpScopedLock lock(s_mutex_capture);
447  v_p = s_controlVelocity;
448  }
449 
450  v_p[0] = 0;
451  v_p[2] = 0;
452  v_p[4] = 0;
453  v_p[5] = 0;
454 
455  // Compute the force/torque control law in the sensor frame
456  v_s = lambda * (sFp * pHp_star - (sHs - sFg * gHg - sHs_bias));
457 
458  v_s[0] = 0.0;
459  v_s[1] = 0.0;
460  v_s[3] = 0.0;
461  v_s[4] = 0.0;
462  v_s[5] = 0.0;
463 
464  vpVelocityTwistMatrix eVs;
465  sVe.inverse(eVs);
466 
467  vpColVector v_e = eVs * v_s + eVp * v_p;
468 
469  std::cout << v_e.t() << std::endl;
470 
471  // Get the robot jacobian eJe
472  robot.get_eJe(eJe);
473 
474  // Compute the joint velocities to achieve the force/torque control
475  q_dot = eJe.pseudoInverse() * v_e;
476 
477  // Send the joint velocities to the robot
478  robot.setVelocity(vpRobot::ARTICULAR_FRAME, q_dot);
479 
480  iter++;
481  }
482  vpTime::wait(1); // 5
483  } while (capture_state_ != capture_stopped);
484 
485  std::cout << "End of control thread" << std::endl;
486  return 0;
487 }
488 
490 int main(int argc, const char *argv[])
491 {
492  unsigned int opt_input = 1; // Default value is 1 to mach the material in the lab
493 
494  // Command line options
495  for (int i = 0; i < argc; i++) {
496  if (std::string(argv[i]) == "--input")
497  opt_input = (unsigned int)atoi(argv[i + 1]);
498  else if (std::string(argv[i]) == "--help") {
499  std::cout << "Usage: " << argv[0] << " [--input <number>] [--help]" << std::endl;
500  return 0;
501  }
502  }
503 
504  // Instantiate the grabber
505  vpV4l2Grabber g;
506 
507  g.setInput(opt_input);
508  g.setScale(1);
509 
510  // init
511  s_robotIsRunning = false;
512  s_controlVelocity = vpColVector(6, 0);
513 
514  // Start the threads
515  vpThread thread_capture((vpThread::Fn)captureFunction, (vpThread::Args)&g);
516  vpThread thread_display((vpThread::Fn)displayFunction);
517  vpThread thread_control((vpThread::Fn)controlFunction);
518 
519  // Wait until thread ends up
520  thread_capture.join();
521  thread_display.join();
522  thread_control.join();
523 
524  return 0;
525 }
527 
528 #else
529 int main()
530 {
531 #ifndef VISP_HAVE_V4L2
532  std::cout << "You should enable V4L2 to make this example working..." << std::endl;
533 #elif !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX
534  std::cout << "You should enable pthread usage and rebuild ViSP..." << std::endl;
535 #else
536  std::cout << "Multi-threading seems not supported on this platform" << std::endl;
537 #endif
538 #ifndef VISP_HAVE_VIPER850
539  std::cout << "You need viper 850 robot to run this example" << std::endl;
540 #endif
541 }
542 
543 #endif
2D region tracker
void update(const vpImage< unsigned char > &I)
Tracking method, to call at every new frame to track.
void init(const vpImage< unsigned char > &I, const vpRectOriented &R)
Initialisation of the tracker : to call to set the region to track (R) in the image (I) before starti...
vpRectOriented getTarget() const
To call after update() at each new frame, to get the position of the ROI in the last acquired frame.
void setHeightResolution(double heightResolution)
void setData(const vpImage< Type > &image)
void setWidthResolution(double widthResolution)
static void convert(const usImagePostScan2D< unsigned char > &image, const double &u, const double &v, double &x, double &y)
Conversion for 2D post-scan images.
void convert(const usImagePostScan2D< unsigned char > &imageToConvert, usImagePreScan2D< unsigned char > &imageConverted, int preScanSamples)
void convert(const usImagePreScan2D< unsigned char > &preScanImage, usImagePostScan2D< unsigned char > &postScanImage, double xResolution=0., double yResolution=0.)
Process a pre-scan image to determine the confidence map.
void run(usImagePreScan2D< unsigned char > &preScanConfidence, const usImagePreScan2D< unsigned char > &preScanImage)
void setFieldOfView(double fieldOfView)
void setTransducerConvexity(const bool isTransducerConvex)
void setDepth(double depth)
void setProbeName(std::string probeName)
void setTransducerRadius(const double transducerRadius)
double getTransducerRadius() const
void setScanLineNumber(unsigned int scanLineNumber)