UsTK : Ultrasound ToolKit  version 2.0.1 under development (2024-11-21)
usNetworkGrabberRF2D.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ustk software.
4  * Copyright (C) 2016 - 2017 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ustk with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * This software was developed at:
17  * Inria Rennes - Bretagne Atlantique
18  * Campus Universitaire de Beaulieu
19  * 35042 Rennes Cedex
20  * France
21  *
22  * If you have questions regarding the use of this file, please contact
23  * Inria at ustk@inria.fr
24  *
25  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
26  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
27  *
28  * Authors:
29  * Pedro Patlan
30  * Marc Pouliquen
31  *
32  *****************************************************************************/
33 
34 #include <visp3/ustk_grabber/usNetworkGrabberRF2D.h>
35 
36 #if defined(USTK_HAVE_QT5) || defined(USTK_HAVE_VTK_QT)
37 
38 #include <QtCore/QDataStream>
39 #include <QtCore/QEventLoop>
44 {
45  // buffer of size 3
46  m_outputBuffer.push_back(new usFrameGrabbedInfo<usImageRF2D<short int> >);
47  m_outputBuffer.push_back(new usFrameGrabbedInfo<usImageRF2D<short int> >);
48  m_outputBuffer.push_back(new usFrameGrabbedInfo<usImageRF2D<short int> >);
49 
50  m_firstFrameAvailable = false;
51 
52  m_recordingOn = false;
53  m_firstImageTimestamp = 0;
54 
55  connect(m_tcpSocket, SIGNAL(readyRead()), this, SLOT(dataArrived()));
56 }
57 
62 
67 // This function is called when the data is fully arrived from the server to the client
69 {
71  QDataStream in;
72  in.setDevice(m_tcpSocket);
73 #if (defined(USTK_HAVE_QT5) || defined(USTK_HAVE_VTK_QT5))
74  in.setVersion(QDataStream::Qt_5_0);
75 #elif defined(USTK_HAVE_VTK_QT4)
76  in.setVersion(QDataStream::Qt_4_8);
77 #else
78  throw(vpException(vpException::fatalError, "your Qt version is not managed in ustk"));
79 #endif
80 
81  int headerType;
82  if (m_bytesLeftToRead == 0) { // do not try to read a header if last frame was not complete
83  in >> headerType;
84  if (m_verbose)
85  std::cout << "header received, type = " << headerType << std::endl;
86  } else {
87  headerType = 0; // not a header received, but a part of a frame
88  }
89  // init confirm header received
90  if (headerType == m_confirmHeader.headerId) {
91  // read whole header
92  in >> m_confirmHeader.initOk;
94 
95  if (m_confirmHeader.initOk == 0) {
96  m_tcpSocket->close();
97  throw(vpException(vpException::fatalError, "porta initialisation error, closing connection."));
98  }
99  if (m_verbose)
100  std::cout << "porta init sucess, detected probe id = " << m_confirmHeader.probeId << std::endl;
101 
102  // read all acquisition parameters received
104 
106  }
107 
108  // image header received
109  else if (headerType == m_imageHeader.headerId) {
110  // read whole header
112  quint64 timestamp;
113  in >> timestamp;
114  m_imageHeader.timeStamp = timestamp;
115 
116  if (m_imageHeader.frameCount == 0) // used to save the sequence
117  m_firstImageTimestamp = timestamp;
118 
119  in >> m_imageHeader.dataRate;
121  in >> m_imageHeader.ss;
122  in >> m_imageHeader.imageType;
129 
137  in >> m_imageHeader.motorType;
138 
139  if (m_verbose) {
140  std::cout << "frameCount = " << m_imageHeader.frameCount << std::endl;
141  std::cout << "timeStamp = " << m_imageHeader.timeStamp << std::endl;
142  std::cout << "dataRate = " << m_imageHeader.dataRate << std::endl;
143  std::cout << "dataLength = " << m_imageHeader.dataLength << std::endl;
144  std::cout << "ss = " << m_imageHeader.ss << std::endl;
145  std::cout << "imageType = " << m_imageHeader.imageType << std::endl;
146  std::cout << "frameWidth = " << m_imageHeader.frameWidth << std::endl;
147  std::cout << "frameHeight = " << m_imageHeader.frameHeight << std::endl;
148  std::cout << "pixelWidth = " << m_imageHeader.pixelWidth << std::endl;
149  std::cout << "pixelHeight = " << m_imageHeader.pixelHeight << std::endl;
150  std::cout << "transmitFrequency = " << m_imageHeader.transmitFrequency << std::endl;
151  std::cout << "samplingFrequency = " << m_imageHeader.samplingFrequency << std::endl;
152  std::cout << "transducerRadius = " << m_imageHeader.transducerRadius << std::endl;
153  std::cout << "scanLinePitch = " << m_imageHeader.scanLinePitch << std::endl;
154  std::cout << "scanLineNumber = " << m_imageHeader.scanLineNumber << std::endl;
155  std::cout << "imageDepth = " << m_imageHeader.imageDepth << std::endl;
156  std::cout << "anglePerFr = " << m_imageHeader.anglePerFr << std::endl;
157  std::cout << "framesPerVolume = " << m_imageHeader.framesPerVolume << std::endl;
158  std::cout << "motorRadius = " << m_imageHeader.motorRadius << std::endl;
159  std::cout << "motorType = " << m_imageHeader.motorType << std::endl;
160  }
161 
162  // update transducer settings with image header received
163  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTransducerRadius(m_imageHeader.transducerRadius);
164  if (m_imageHeader.transducerRadius != 0.) {
165  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setScanLinePitch(m_imageHeader.scanLinePitch);
166  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTransducerConvexity(true);
167  } else { // linear transducer
168  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setScanLinePitch(0.);
169  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTransducerConvexity(false);
170  }
171  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setDepth(m_imageHeader.imageDepth / 1000.0);
172  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)
173  ->setAxialResolution((m_imageHeader.imageDepth / 1000.0) / m_imageHeader.frameHeight);
174  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTransmitFrequency(m_imageHeader.transmitFrequency);
175  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setSamplingFrequency(m_imageHeader.samplingFrequency);
176 
177  // set data info
178  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setFrameCount(m_imageHeader.frameCount);
179  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setFramesPerVolume(m_imageHeader.framesPerVolume);
180  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->setTimeStamp(m_imageHeader.timeStamp);
181 
182  // warning if timestamps are close (< 1 ms)
183  if (m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getTimeStamp() -
184  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getTimeStamp() <
185  1) {
186  std::cout << "WARNING : new image received with an acquisition timestamp close to previous image" << std::endl;
187  }
188 
189  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)
191 
193 
194  m_bytesLeftToRead -= in.readRawData((char *)m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->bitmap,
196 
197  if (m_bytesLeftToRead == 0) { // we've read all the frame in 1 packet.
198  // Now CURRENT_FILLED_FRAME_POSITION_IN_VEC has become the last frame received
199  // So we switch pointers beween MOST_RECENT_FRAME_POSITION_IN_VEC and CURRENT_FILLED_FRAME_POSITION_IN_VEC
201  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC) = m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC);
202  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC) = savePtr;
203  if (m_recordingOn)
204  m_sequenceWriter.write(*m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC),
205  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getTimeStamp() -
206  m_firstImageTimestamp);
207 
208  m_firstFrameAvailable = true;
209  emit(newFrameAvailable());
210  emit(newFrame(*m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)));
211  }
212  if (m_verbose)
213  std::cout << "Bytes left to read for whole frame = " << m_bytesLeftToRead << std::endl;
214 
215  }
216 
217  // we have a part of the image still not read (arrived with next tcp packet)
218  else {
219  if (m_verbose) {
220  std::cout << "reading following part of the frame" << std::endl;
221  std::cout << "local image size = " << m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getNumberOfPixel()
222  << std::endl;
223  }
224  m_bytesLeftToRead -= in.readRawData(
225  ((char *)m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->bitmap) +
226  ((m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC)->getNumberOfPixel() * 2) - m_bytesLeftToRead),
228 
229  if (m_verbose)
230  std::cout << "Bytes left to read for whole frame = " << m_bytesLeftToRead << std::endl;
231 
232  if (m_bytesLeftToRead == 0) { // we've read the last part of the frame.
233  // Now CURRENT_FILLED_FRAME_POSITION_IN_VEC has become the last frame received
234  // So we switch pointers beween MOST_RECENT_FRAME_POSITION_IN_VEC and CURRENT_FILLED_FRAME_POSITION_IN_VEC
236  m_outputBuffer.at(CURRENT_FILLED_FRAME_POSITION_IN_VEC) = m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC);
237  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC) = savePtr;
238  if (m_recordingOn)
239  m_sequenceWriter.write(*m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC),
240  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getTimeStamp() -
241  m_firstImageTimestamp);
242 
243  m_firstFrameAvailable = true;
244  emit(newFrameAvailable());
245  emit(newFrame(*m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)));
246  }
247  }
248 }
249 
257 {
258  // manage first frame or if user grabs too fast
259  if (!m_firstFrameAvailable ||
260  m_outputBuffer.at(OUTPUT_FRAME_POSITION_IN_VEC)->getFrameCount() >=
261  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC)->getFrameCount()) {
262  // we wait until a new frame is available
263  QEventLoop loop;
264  loop.connect(this, SIGNAL(newFrameAvailable()), SLOT(quit()));
265  loop.exec();
266  }
267 
268  // switch pointers
270  m_outputBuffer.at(OUTPUT_FRAME_POSITION_IN_VEC) = m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC);
271  m_outputBuffer.at(MOST_RECENT_FRAME_POSITION_IN_VEC) = savePtr;
272 
273  return m_outputBuffer.at(OUTPUT_FRAME_POSITION_IN_VEC);
274 }
275 
281 {
282  m_recordingOn = true;
283  m_sequenceWriter.setSequenceDirectory(path);
284 }
285 
289 void usNetworkGrabberRF2D::stopRecording() { m_recordingOn = false; }
290 
291 #endif
Class to store additionnal informations arriving on the network with ultrasound images grabbed,...
void setSequenceDirectory(const std::string sequenceDirectory)
void write(const usImageRF2D< short int > &image, const uint64_t timestamp)
void activateRecording(std::string path)
void newFrame(usImageRF2D< short int > &)
usFrameGrabbedInfo< usImageRF2D< short int > > * acquire()
usNetworkGrabberRF2D(usNetworkGrabber *parent=0)
Generic abstract class to manage tcp connection to grab ultrasound frames (on port 8080).
usInitHeaderConfirmation m_confirmHeader
@ CURRENT_FILLED_FRAME_POSITION_IN_VEC
void readAcquisitionParameters(QDataStream &stream)
void serverUpdateEnded(bool success)
us::usImageHeader m_imageHeader
QTcpSocket * m_tcpSocket
int motorType
Definition: us.h:120
double anglePerFr
Definition: us.h:117
int dataLength
Definition: us.h:89
int frameWidth
Definition: us.h:94
double dataRate
Definition: us.h:87
unsigned int scanLineNumber
Definition: us.h:109
int imageDepth
Definition: us.h:110
uint32_t frameCount
Definition: us.h:84
int samplingFrequency
Definition: us.h:101
int framesPerVolume
Definition: us.h:118
double pixelHeight
Definition: us.h:98
double motorRadius
Definition: us.h:119
int transmitFrequency
Definition: us.h:100
int frameHeight
Definition: us.h:95
int imageType
Definition: us.h:92
double pixelWidth
Definition: us.h:97
int headerId
Definition: us.h:81
uint64_t timeStamp
Definition: us.h:85
double transducerRadius
Definition: us.h:107
double scanLinePitch
Definition: us.h:108