UsTK : Ultrasound ToolKit  version 2.0.1 under development (2025-01-22)
tutorial-sonosite-confidence-control.cpp
1 #include <iostream>
4 
5 #include <visp3/core/vpImageConvert.h>
6 #include <visp3/core/vpMutex.h>
7 #include <visp3/core/vpThread.h>
8 #include <visp3/core/vpTime.h>
9 #include <visp3/gui/vpDisplayX.h>
10 #include <visp3/gui/vpPlot.h>
11 #include <visp3/robot/vpRobotViper850.h>
12 #include <visp3/sensor/vpV4l2Grabber.h>
13 
14 #include <visp3/ustk_confidence_map/usScanlineConfidence2D.h>
15 #include <visp3/ustk_core/usImagePostScan2D.h>
16 #include <visp3/ustk_core/usImagePreScan2D.h>
17 #include <visp3/ustk_core/usPostScanToPreScan2DConverter.h>
18 #include <visp3/ustk_core/usPreScanToPostScan2DConverter.h>
19 
20 #if defined(VISP_HAVE_V4L2) && defined(VISP_HAVE_PTHREAD) && defined(VISP_HAVE_VIPER850)
21 
22 // Shared vars
23 typedef enum { capture_waiting, capture_started, capture_stopped } t_CaptureState;
24 t_CaptureState s_capture_state = capture_waiting;
25 vpImage<unsigned char> s_frame;
26 vpMutex s_mutex_capture;
27 
28 bool s_robotIsRunning;
29 vpMutex s_mutex_control_velocity;
30 vpColVector s_controlVelocity(6, 0.);
31 
33 
35 vpThread::Return captureFunction(vpThread::Args args)
36 {
37  vpV4l2Grabber cap = *((vpV4l2Grabber *)args);
38  vpImage<unsigned char> frame_;
39  bool stop_capture_ = false;
40 
41  cap.open(frame_);
42 
43  vpRect roi(vpImagePoint(55, 70), vpImagePoint(410, 555)); // roi to remove sonosite banners
44 
45  while (!stop_capture_) {
46  // Capture in progress
47  cap.acquire(frame_, roi); // get a new frame from camera
48 
49  // Update shared data
50  {
51  vpMutex::vpScopedLock lock(s_mutex_capture);
52  if (s_capture_state == capture_stopped)
53  stop_capture_ = true;
54  else
55  s_capture_state = capture_started;
56  s_frame = frame_;
57  }
58  }
59 
60  {
61  vpMutex::vpScopedLock lock(s_mutex_capture);
62  s_capture_state = capture_stopped;
63  }
64  std::cout << "End of capture thread" << std::endl;
65  return 0;
66 }
68 
70 vpThread::Return displayFunction(vpThread::Args args)
71 {
72  (void)args; // Avoid warning: unused parameter args
73  vpImage<unsigned char> I_;
76  usImagePostScan2D<unsigned char> confidencePostScan_;
77  usImagePreScan2D<unsigned char> confidencePreScan_;
78  usScanlineConfidence2D confidenceMapProcessor_;
79  usPostScanToPreScan2DConverter backConverter_;
81  usTransducerSettings transducerSettings;
82 
83  transducerSettings.setTransducerRadius(0.060);
84  transducerSettings.setTransducerConvexity(true);
85  transducerSettings.setScanLineNumber(128);
86  transducerSettings.setFieldOfView(vpMath::rad(57.0)); // field of view is 57 deg
87  transducerSettings.setDepth(0.12);
88 
89  t_CaptureState capture_state_;
90  bool display_initialized_ = false;
91 
92 #if defined(VISP_HAVE_X11)
93  vpDisplayX *dpost_scan_ = NULL; // display post-scan image
94  vpDisplayX *dpre_scan_ = NULL; // display pre-scan image
95  vpDisplayX *dpost_scan_confidence_ = NULL; // display post-scan image
96  vpDisplayX *dpre_scan_confidence_ = NULL; // display pre-scan image
97 
98 #endif
99 
100  vpPlot plot(1);
101  plot.initGraph(0, 1);
102  plot.initRange(0, 0, 10, -10, 10);
103  plot.setTitle(0, "confidence barycenter error");
104  plot.setUnitY(0, "error(degrees)");
105  plot.setLegend(0, 0, "time");
106 
107  double startTime = vpTime::measureTimeMs();
108 
109  do {
110  s_mutex_capture.lock();
111  capture_state_ = s_capture_state;
112  s_mutex_capture.unlock();
113 
114  // Check if a frame is available
115  if (capture_state_ == capture_started) {
116  // Create a copy of the captured frame
117  {
118  vpMutex::vpScopedLock lock(s_mutex_capture);
119  I_ = s_frame;
120  }
121 
122  // Convert image into post-scan image
123  postScan_.setData(I_);
124  postScan_.setProbeName("Sonosite C60");
125  postScan_.setTransducerRadius(0.060);
126  postScan_.setTransducerConvexity(true);
127  postScan_.setScanLineNumber(128);
128  postScan_.setFieldOfView(vpMath::rad(57.0)); // field of view is 57 deg for sonosite
129  postScan_.setDepth(0.12);
130 
131  // pixel size
132  postScan_.setHeightResolution((postScan_.getTransducerRadius() + postScan_.getDepth() -
133  (postScan_.getTransducerRadius() * cos(postScan_.getFieldOfView() / 2.0))) /
134  postScan_.getHeight());
135 
136  postScan_.setWidthResolution(
137  (2.0 * (postScan_.getTransducerRadius() + postScan_.getDepth()) * sin(postScan_.getFieldOfView() / 2.0)) /
138  postScan_.getWidth());
139 
140  backConverter_.convert(postScan_, preScan_, 480);
141  // Compute confidence map on pre-scan image
142  confidenceMapProcessor_.run(confidencePreScan_, preScan_);
143 
144  // converting computed confidence map in post-scan
145  converter_.convert(confidencePreScan_, confidencePostScan_);
146 
147  unsigned int height(confidencePreScan_.getHeight()), width(confidencePreScan_.getWidth());
148 
149  double I_sum = 0.0;
150  for (unsigned int i = 0; i < height; ++i)
151  for (unsigned int j = 0; j < width; ++j)
152  I_sum += static_cast<double>(confidencePreScan_[i][j]);
153 
154  double yc = 0.0;
155  for (unsigned int x = 0; x < height; ++x)
156  for (unsigned int y = 0; y < width; ++y) {
157  yc += y * confidencePreScan_[x][y];
158  }
159  yc /= I_sum;
160 
161  double tc = yc * confidencePreScan_.getScanLinePitch() - confidencePreScan_.getFieldOfView() / 2.0;
162 
163  double time = (vpTime::measureTimeMs() - startTime) / 1000.0;
164 
165  plot.plot(0, 0, time, vpMath::deg(tc));
166 
167  {
168  vpMutex::vpScopedLock lock(s_mutex_capture);
169  s_controlVelocity = 0.0;
170  s_controlVelocity[3] = 0.5 * tc;
171  }
172  // Check if we need to initialize the display with the first frame
173  if (!display_initialized_) {
174 // Initialize the display
175 #if defined(VISP_HAVE_X11)
176  unsigned int xpos = 10;
177  dpost_scan_ = new vpDisplayX(postScan_, xpos, 10, "post-scan");
178  xpos += 80 + postScan_.getWidth();
179  dpre_scan_ = new vpDisplayX(preScan_, xpos, 10, "pre-scan");
180  xpos += 40 + preScan_.getWidth();
181  dpre_scan_confidence_ = new vpDisplayX(confidencePreScan_, xpos, 10, "pre-scan confidence");
182  xpos += 40 + confidencePreScan_.getWidth();
183  dpost_scan_confidence_ = new vpDisplayX(confidencePostScan_, xpos, 10, "post-scan confidence");
184  display_initialized_ = true;
185 #endif
186  }
187 
188  // Display the image
189  vpDisplay::display(postScan_);
190  vpDisplay::display(preScan_);
191  vpDisplay::display(confidencePreScan_);
192  vpDisplay::display(confidencePostScan_);
193 
194  // Trigger end of acquisition with a mouse click
195  vpDisplay::displayText(postScan_, 10, 10, "Click to exit...", vpColor::red);
196  if (vpDisplay::getClick(postScan_, false)) {
197  vpMutex::vpScopedLock lock(s_mutex_capture);
198  s_capture_state = capture_stopped;
199  }
200  vpDisplay::displayText(preScan_, 10, 10, "Click to exit...", vpColor::red);
201  if (vpDisplay::getClick(preScan_, false)) {
202  vpMutex::vpScopedLock lock(s_mutex_capture);
203  s_capture_state = capture_stopped;
204  }
205  vpDisplay::displayText(confidencePreScan_, 10, 10, "Click to exit...", vpColor::red);
206  if (vpDisplay::getClick(confidencePreScan_, false)) {
207  vpMutex::vpScopedLock lock(s_mutex_capture);
208  s_capture_state = capture_stopped;
209  }
210  vpDisplay::displayText(confidencePostScan_, 10, 10, "Click to exit...", vpColor::red);
211  if (vpDisplay::getClick(confidencePostScan_, false)) {
212  vpMutex::vpScopedLock lock(s_mutex_capture);
213  s_capture_state = capture_stopped;
214  }
215 
216  // Update the display
217  vpDisplay::flush(postScan_);
218  vpDisplay::flush(preScan_);
219  vpDisplay::flush(confidencePostScan_);
220  vpDisplay::flush(confidencePreScan_);
221  } else {
222  vpTime::wait(2); // Sleep 2ms
223  }
224  } while (capture_state_ != capture_stopped);
225 
226 #if defined(VISP_HAVE_X11)
227  delete dpost_scan_;
228  delete dpre_scan_;
229  delete dpre_scan_confidence_;
230  delete dpost_scan_confidence_;
231 #endif
232 
233  std::cout << "End of display thread" << std::endl;
234  return 0;
235 }
237 
238 vpThread::Return controlFunction(vpThread::Args args)
239 {
240  (void)args;
241  vpRobotViper850 robot;
242 
243  vpMatrix eJe; // robot jacobian
244 
245  // Transformation from end-effector frame to the force/torque sensor
246  // Note that the end-effector frame is located on the lower part of
247  // male component of the tool changer.
248  vpHomogeneousMatrix eMs;
249  eMs[2][3] = -0.062; // tz = -6.2cm
250 
251  // Transformation from force/torque sensor to the probe frame from where
252  // we want to control the robot
253  vpHomogeneousMatrix sMp;
254 
255  // Transformation from force/torque sensor to the end-effector frame
256  vpHomogeneousMatrix sMe;
257  eMs.inverse(sMe);
258 
259  // Build the transformation that allows to convert a velocity in the
260  // end-effector frame to the FT sensor frame
261  vpVelocityTwistMatrix sVe;
262  sVe.buildFrom(sMe);
263 
264  vpColVector sHs(6); // force/torque sensor measures
265  vpColVector sHs_star(6); // force/torque sensor desired values in sensor frame
266  vpColVector pHp_star(6); // force/torque sensor desired values in probe frame
267  vpColVector gHg(6); // force/torque due to the gravity
268  vpMatrix lambda(6, 6);
269  // Position of the cog of the object attached after the sensor in the sensor frame
270  vpTranslationVector stg;
271  vpColVector sHs_bias(6); // force/torque sensor measures for bias
272 
273  // Cartesian velocities corresponding to the force/torque control in the
274  // sensor frame
275  vpColVector v_s(6);
276  // Joint velocities corresponding to the force/torque control
277  vpColVector q_dot(6);
278 
279  // Initialized the force gain
280  lambda = 0;
281  for (int i = 0; i < 3; i++)
282  lambda[i][i] = 0.02 / 6;
283  // Initialized the torque gain
284  for (int i = 3; i < 6; i++)
285  lambda[i][i] = 1. / 2;
286 
287  // Initialize the desired force/torque values
288  pHp_star = 0;
289  pHp_star[2] = 3; // Fz = 3N
290  //
291  // Case of the C65 US probe
292  //
293  // Set the probe frame control
294  sMp[2][3] = 0.262; // tz = 26.2cm
295 
296  // Init the force/torque due to the gravity
297  gHg[2] = -(0.696 + 0.476) * 9.81; // m*g
298 
299  // Position of the cog of the object attached after the sensor in the sensor frame
300  stg[0] = 0;
301  stg[1] = 0;
302  stg[2] = 0.088; // tz = 88.4mm
303 
304  vpRotationMatrix sRp;
305  sMp.extract(sRp);
306  vpTranslationVector stp;
307  sMp.extract(stp);
308 
309  vpHomogeneousMatrix eMp = eMs * sMp;
310  vpVelocityTwistMatrix eVp(eMp);
311 
312  // Get the position of the end-effector in the reference frame
313  vpColVector q;
314  vpHomogeneousMatrix fMe;
315  vpHomogeneousMatrix fMs;
316  vpRotationMatrix sRf;
317  robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
318  robot.get_fMe(q, fMe);
319  // Compute the position of the sensor frame in the reference frame
320  fMs = fMe * eMs;
321  vpHomogeneousMatrix sMf;
322  fMs.inverse(sMf);
323  sMf.extract(sRf);
324 
325  // Build the transformation that allows to convert the forces due to the
326  // gravity in the sensor frame
327  vpForceTwistMatrix sFg(sMf); // Only the rotation part is to consider
328  // Modify the translational part
329  for (int i = 0; i < 3; i++)
330  for (int j = 0; j < 3; j++)
331  sFg[i + 3][j] = (stg.skew() * sRf)[i][j];
332 
333  // Build the transformation that allows to convert a FT expressed in the
334  // FT probe frame into the sensor frame
335  vpForceTwistMatrix sFp(sMp);
336 
337  // Bias the force/torque sensor
338  std::cout << "\nBias the force/torque sensor...\n " << std::endl;
339  robot.biasForceTorqueSensor();
340 
341  // Set the robot to velocity control
342  robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
343 
344  int iter = 0;
345  t_CaptureState capture_state_;
346 
347  std::cout << "Starting control loop..." << std::endl;
348  do {
349  s_mutex_capture.lock();
350  capture_state_ = s_capture_state;
351  s_mutex_capture.unlock();
352 
353  // Check if a frame is available
354  if (capture_state_ == capture_started) {
355 
356  // Get the force/torque measures from the sensor
357  sHs = robot.getForceTorque();
358 
359  // Multiply the measures by -1 to get the force/torque exerced by the
360  // robot to the environment.
361  sHs *= -1;
362 
363  // Update the gravity transformation matrix
364  robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
365  robot.get_fMe(q, fMe);
366  // Compute the position of the sensor frame in the reference frame
367  fMs = fMe * eMs;
368  // Compute the inverse transformation
369  fMs.inverse(sMf);
370  sMf.extract(sRf);
371  // Update the transformation that allows to convert the forces due to the
372  // gravity in the sensor frame
373  sFg.buildFrom(sMf); // Only the rotation part is to consider
374  // Modify the translational part
375  for (int i = 0; i < 3; i++)
376  for (int j = 0; j < 3; j++)
377  sFg[i + 3][j] = (stg.skew() * sRf)[i][j];
378 
379  if (iter == 0) {
380  sHs_bias = sHs - sFg * gHg;
381  }
382 
383  // Compute rotation in probe frame from control velocity deduced of the confidence map barycenter
384  vpColVector v_p;
385  {
386  vpMutex::vpScopedLock lock(s_mutex_capture);
387  v_p = s_controlVelocity;
388  }
389 
390  v_p[0] = 0;
391  v_p[1] = 0;
392  v_p[2] = 0;
393  v_p[4] = 0;
394  v_p[5] = 0;
395 
396  // Compute the force/torque control law in the sensor frame
397  v_s = lambda * (sFp * pHp_star - (sHs - sFg * gHg - sHs_bias));
398 
399  v_s[0] = 0.0;
400  v_s[1] = 0.0;
401  v_s[3] = 0.0;
402  v_s[4] = 0.0;
403  v_s[5] = 0.0;
404 
405  vpVelocityTwistMatrix eVs;
406  sVe.inverse(eVs);
407 
408  vpColVector v_e = eVs * v_s + eVp * v_p;
409 
410  // Get the robot jacobian eJe
411  robot.get_eJe(eJe);
412 
413  // Compute the joint velocities to achieve the force/torque control
414  q_dot = eJe.pseudoInverse() * v_e;
415 
416  // Send the joint velocities to the robot
417  robot.setVelocity(vpRobot::ARTICULAR_FRAME, q_dot);
418 
419  iter++;
420  }
421  vpTime::wait(1); // 5
422  } while (capture_state_ != capture_stopped);
423 
424  std::cout << "End of control thread" << std::endl;
425  return 0;
426 }
427 
429 int main(int argc, const char *argv[])
430 {
431  unsigned int opt_input = 1; // Default value is 1 to mach the material in the lab
432 
433  // Command line options
434  for (int i = 0; i < argc; i++) {
435  if (std::string(argv[i]) == "--input")
436  opt_input = (unsigned int)atoi(argv[i + 1]);
437  else if (std::string(argv[i]) == "--help") {
438  std::cout << "Usage: " << argv[0] << " [--input <number>] [--help]" << std::endl;
439  return 0;
440  }
441  }
442 
443  // Instantiate the grabber
444  vpV4l2Grabber g;
445 
446  g.setInput(opt_input);
447  g.setScale(1);
448 
449  // init
450  s_robotIsRunning = false;
451  s_controlVelocity = vpColVector(6, 0);
452 
453  // Start the threads
454  vpThread thread_capture((vpThread::Fn)captureFunction, (vpThread::Args)&g);
455  vpThread thread_display((vpThread::Fn)displayFunction);
456  vpThread thread_control((vpThread::Fn)controlFunction);
457 
458  // Wait until thread ends up
459  thread_capture.join();
460  thread_display.join();
461  thread_control.join();
462 
463  return 0;
464 }
466 
467 #else
468 int main()
469 {
470 #ifndef VISP_HAVE_V4L2
471  std::cout << "You should enable V4L2 to make this example working..." << std::endl;
472 #elif !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX
473  std::cout << "You should enable pthread usage and rebuild ViSP..." << std::endl;
474 #else
475  std::cout << "Multi-threading seems not supported on this platform" << std::endl;
476 #endif
477 #ifndef VISP_HAVE_VIPER850
478  std::cout << "You need viper 850 robot to run this example" << std::endl;
479 #endif
480 }
481 
482 #endif
void setHeightResolution(double heightResolution)
void setData(const vpImage< Type > &image)
void setWidthResolution(double widthResolution)
void convert(const usImagePostScan2D< unsigned char > &imageToConvert, usImagePreScan2D< unsigned char > &imageConverted, int preScanSamples)
void convert(const usImagePreScan2D< unsigned char > &preScanImage, usImagePostScan2D< unsigned char > &postScanImage, double xResolution=0., double yResolution=0.)
Process a pre-scan image to determine the confidence map.
void run(usImagePreScan2D< unsigned char > &preScanConfidence, const usImagePreScan2D< unsigned char > &preScanImage)
Generic class for 2D ultrasound data common settings associated to the type of probe transducer used ...
void setFieldOfView(double fieldOfView)
void setTransducerConvexity(const bool isTransducerConvex)
void setDepth(double depth)
void setProbeName(std::string probeName)
void setTransducerRadius(const double transducerRadius)
double getTransducerRadius() const
void setScanLineNumber(unsigned int scanLineNumber)