UsTK : Ultrasound ToolKit  version 2.0.1 under development (2023-12-07)
usNetworkGrabberPostScan2D.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ustk software.
4  * Copyright (C) 2016 - 2017 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ustk with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * This software was developed at:
17  * Inria Rennes - Bretagne Atlantique
18  * Campus Universitaire de Beaulieu
19  * 35042 Rennes Cedex
20  * France
21  *
22  * If you have questions regarding the use of this file, please contact
23  * Inria at ustk@inria.fr
24  *
25  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
26  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
27  *
28  * Authors:
29  * Pedro Patlan
30  * Marc Pouliquen
31  *
32  *****************************************************************************/
33 
34 #include <visp3/ustk_grabber/usNetworkGrabberPostScan2D.h>
35 
36 #if defined(USTK_HAVE_QT5) || defined(USTK_HAVE_VTK_QT)
37 
38 #include <QtCore/QDataStream>
39 #include <QtCore/QEventLoop>
40 
45 {
46  // buffer of size 3
47  m_outputBuffer.push_back(new usFrameGrabbedInfo<usImagePostScan2D<unsigned char> >);
48  m_outputBuffer.push_back(new usFrameGrabbedInfo<usImagePostScan2D<unsigned char> >);
49  m_outputBuffer.push_back(new usFrameGrabbedInfo<usImagePostScan2D<unsigned char> >);
50 
51  m_firstFrameAvailable = false;
52 
53  m_recordingOn = false;
54  m_firstImageTimestamp = 0;
55 
56  connect(m_tcpSocket, SIGNAL(readyRead()), this, SLOT(dataArrived()));
57 }
58 
63 
69 {
71  QDataStream in;
72  in.setDevice(m_tcpSocket);
73 #if (defined(USTK_HAVE_QT5) || defined(USTK_HAVE_VTK_QT5))
74  in.setVersion(QDataStream::Qt_5_0);
75 #elif defined(USTK_HAVE_VTK_QT4)
76  in.setVersion(QDataStream::Qt_4_8);
77 #else
78  throw(vpException(vpException::fatalError, "your Qt version is not managed in ustk"));
79 #endif
80 
81  int headerType;
82  if (m_bytesLeftToRead == 0) { // do not try to read a header if last frame was not complete
83  in >> headerType;
84  if (m_verbose)
85  std::cout << "header received, type = " << headerType << std::endl;
86  } else {
87  headerType = 0; // not a header received, but a part of a frame
88  }
89  // init confirm header received
90  if (headerType == m_confirmHeader.headerId) {
91  // read whole header
92  in >> m_confirmHeader.initOk;
94 
95  if (m_confirmHeader.initOk == 0) {
96  m_tcpSocket->close();
97  throw(vpException(vpException::fatalError, "porta initialisation error, closing connection."));
98  }
99  if (m_verbose)
100  std::cout << "porta init sucess, detected probe id = " << m_confirmHeader.probeId << std::endl;
101 
102  // read all acquisition parameters received
104 
106  }
107 
108  // image header received
109  else if (headerType == m_imageHeader.headerId) {
110  // read whole header
112  quint64 timestamp;
113  in >> timestamp;
114  m_imageHeader.timeStamp = timestamp;
115 
116  if (m_imageHeader.frameCount == 0) // used to save the sequence
117  m_firstImageTimestamp = timestamp;
118 
119  in >> m_imageHeader.dataRate;
121  in >> m_imageHeader.ss;
122  in >> m_imageHeader.imageType;
129 
137  in >> m_imageHeader.motorType;
138 
139  if (m_verbose) {
140  std::cout << "frameCount = " << m_imageHeader.frameCount << std::endl;
141  std::cout << "timeStamp = " << m_imageHeader.timeStamp << std::endl;
142  std::cout << "dataRate = " << m_imageHeader.dataRate << std::endl;
143  std::cout << "dataLength = " << m_imageHeader.dataLength << std::endl;
144  std::cout << "ss = " << m_imageHeader.ss << std::endl;
145  std::cout << "imageType = " << m_imageHeader.imageType << std::endl;
146  std::cout << "frameWidth = " << m_imageHeader.frameWidth << std::endl;
147  std::cout << "frameHeight = " << m_imageHeader.frameHeight << std::endl;
148  std::cout << "pixelWidth = " << m_imageHeader.pixelWidth << std::endl;
149  std::cout << "pixelHeight = " << m_imageHeader.pixelHeight << std::endl;
150  std::cout << "transmitFrequency = " << m_imageHeader.transmitFrequency << std::endl;
151  std::cout << "samplingFrequency = " << m_imageHeader.samplingFrequency << std::endl;
152  std::cout << "transducerRadius = " << m_imageHeader.transducerRadius << std::endl;
153  std::cout << "scanLinePitch = " << m_imageHeader.scanLinePitch << std::endl;
154  std::cout << "scanLineNumber = " << m_imageHeader.scanLineNumber << std::endl;
155  std::cout << "imageDepth = " << m_imageHeader.imageDepth << std::endl;
156  std::cout << "anglePerFr = " << m_imageHeader.anglePerFr << std::endl;
157  std::cout << "framesPerVolume = " << m_imageHeader.framesPerVolume << std::endl;
158  std::cout << "motorRadius = " << m_imageHeader.motorRadius << std::endl;
159  std::cout << "motorType = " << m_imageHeader.motorType << std::endl;
160  }
161 
162  // update transducer settings with image header received
163  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTransducerRadius(m_imageHeader.transducerRadius);
164  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setScanLinePitch(m_imageHeader.scanLinePitch);
165  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setScanLineNumber(m_imageHeader.scanLineNumber);
166  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setDepth(m_imageHeader.imageDepth / 1000.0);
167  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)
168  ->setTransducerConvexity(m_imageHeader.transducerRadius != 0.);
169  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTransmitFrequency(m_imageHeader.transmitFrequency);
170  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setSamplingFrequency(m_imageHeader.samplingFrequency);
171 
172  // set data info
173  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setFrameCount(m_imageHeader.frameCount);
174  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setFramesPerVolume(m_imageHeader.framesPerVolume);
175  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTimeStamp(m_imageHeader.timeStamp);
176 
177  // warning if timestamps are close (< 1 ms)
178  if (m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getTimeStamp() -
179  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getTimeStamp() <
180  1) {
181  std::cout << "WARNING : new image received with an acquisition timestamp close to previous image" << std::endl;
182  }
183 
184  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)
186 
187  // pixel size
188  if (m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getTransducerRadius() > 0) { // convex probe
189  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setWidthResolution(m_imageHeader.pixelWidth);
190 
191  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setHeightResolution(m_imageHeader.pixelHeight);
192  } else { // linear probe
193  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)
194  ->setWidthResolution(m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getScanLinePitch() /
195  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getWidth());
196 
197  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)
198  ->setHeightResolution(m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getDepth() /
199  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getHeight());
200  }
201  // read image content
203 
204  m_bytesLeftToRead -= in.readRawData((char *)m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->bitmap,
206 
207  if (m_bytesLeftToRead == 0) { // we've read all the frame in 1 packet.
208  // Now CURRENT_FILLED_FRAME_POSITION_IN_VEC has become the last frame received
209  // So we switch pointers beween MOST_RECENT_FRAME_POSITION_IN_VEC and CURRENT_FILLED_FRAME_POSITION_IN_VEC
210  {
212  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC);
213  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC) = m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC);
214  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC) = savePtr;
215  if (m_recordingOn)
216  m_sequenceWriter.write(*m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC),
217  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getTimeStamp() -
218  m_firstImageTimestamp);
219  }
220  m_firstFrameAvailable = true;
221  emit(newFrameAvailable());
222  emit(newFrame(*(m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC))));
223  }
224  if (m_verbose)
225  std::cout << "Bytes left to read for whole frame = " << m_bytesLeftToRead << std::endl;
226 
227  }
228 
229  // we have a part of the image still not read (arrived with next tcp packet)
230  else {
231  if (m_verbose) {
232  std::cout << "reading following part of the frame, left to read = " << m_bytesLeftToRead << std::endl;
233  std::cout << "local image size = " << m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getSize()
234  << std::endl;
235  }
237  in.readRawData((char *)m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->bitmap +
238  (m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getSize() - m_bytesLeftToRead),
240 
241  if (m_bytesLeftToRead == 0) { // we've read the last part of the frame.
242  // Now CURRENT_FILLED_FRAME_POSITION_IN_VEC has become the last frame received
243  // So we switch pointers beween MOST_RECENT_FRAME_POSITION_IN_VEC and CURRENT_FILLED_FRAME_POSITION_IN_VEC
245  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC);
246  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC) = m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC);
247  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC) = savePtr;
248 
249  if (m_recordingOn)
250  m_sequenceWriter.write(*m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC),
251  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getTimeStamp() -
252  m_firstImageTimestamp);
253 
254  m_firstFrameAvailable = true;
255  emit(newFrameAvailable());
256  emit(newFrame(*(m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC))));
257  }
258  }
259 }
260 
268 {
269  // manage first frame or if user grabs too fast
270  if (!m_firstFrameAvailable ||
271  m_outputBuffer.at(OUTPUT_FRAME_POSITION_IN_VEC)->getFrameCount() >=
272  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getFrameCount()) {
273  // we wait until a new frame is available
274  QEventLoop loop;
275  loop.connect(this, SIGNAL(newFrameAvailable()), SLOT(quit()));
276  loop.exec();
277  }
278  // switch pointers
280  m_outputBuffer.at(OUTPUT_FRAME_POSITION_IN_VEC) = m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC);
281  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC) = savePtr;
282 
283  return m_outputBuffer.at(OUTPUT_FRAME_POSITION_IN_VEC);
284 }
285 
292 {
293  for (unsigned int i = 0; i < m_outputBuffer.size(); i++)
294  m_outputBuffer.at(i)->display = display;
295 }
296 
302 {
303  m_recordingOn = true;
304  m_sequenceWriter.setSequenceDirectory(path);
305 }
306 
310 void usNetworkGrabberPostScan2D::stopRecording() { m_recordingOn = false; }
311 
312 #endif
Class to store additionnal informations arriving on the network with ultrasound images grabbed,...
void setSequenceDirectory(const std::string sequenceDirectory)
void write(const usImageRF2D< short int > &image, const uint64_t timestamp)
usFrameGrabbedInfo< usImagePostScan2D< unsigned char > > * acquire()
void newFrame(usImagePostScan2D< unsigned char > image)
usNetworkGrabberPostScan2D(usNetworkGrabber *parent=0)
Generic abstract class to manage tcp connection to grab ultrasound frames (on port 8080).
usInitHeaderConfirmation m_confirmHeader
@ CURRENT_FILLED_FRAME_POSITION_IN_VEC
void readAcquisitionParameters(QDataStream &stream)
void serverUpdateEnded(bool success)
us::usImageHeader m_imageHeader
QTcpSocket * m_tcpSocket
int motorType
Definition: us.h:120
double anglePerFr
Definition: us.h:117
int dataLength
Definition: us.h:89
int frameWidth
Definition: us.h:94
double dataRate
Definition: us.h:87
unsigned int scanLineNumber
Definition: us.h:109
int imageDepth
Definition: us.h:110
uint32_t frameCount
Definition: us.h:84
int samplingFrequency
Definition: us.h:101
int framesPerVolume
Definition: us.h:118
double pixelHeight
Definition: us.h:98
double motorRadius
Definition: us.h:119
int transmitFrequency
Definition: us.h:100
int frameHeight
Definition: us.h:95
int imageType
Definition: us.h:92
double pixelWidth
Definition: us.h:97
int headerId
Definition: us.h:81
uint64_t timeStamp
Definition: us.h:85
double transducerRadius
Definition: us.h:107
double scanLinePitch
Definition: us.h:108