Autonomy Software C++ 24.5.1
Welcome to the Autonomy Software repository of the Mars Rover Design Team (MRDT) at Missouri University of Science and Technology (Missouri S&T)! API reference contains the source code and other resources for the development of the autonomy software for our Mars rover. The Autonomy Software project aims to compete in the University Rover Challenge (URC) by demonstrating advanced autonomous capabilities and robust navigation algorithms.
Loading...
Searching...
No Matches
TensorflowTPU.hpp
Go to the documentation of this file.
1
12#ifndef TENSORFLOW_TPU_HPP
13#define TENSORFLOW_TPU_HPP
14
15#include "../AutonomyLogging.h"
16
18#include <edgetpu.h>
19#include <tensorflow/lite/builtin_ops.h>
20#include <tensorflow/lite/interpreter.h>
21#include <tensorflow/lite/kernels/register.h>
22#include <tensorflow/lite/model.h>
23#include <vector>
24
26
27
37template<typename T, typename P>
39{
40 public:
42 // Declare public enums that are specific to and used withing this class.
44
45 // Enumerator for selecting which EdgeTPU device type the model should try to run on.
46 enum DeviceType
47 {
48 eAuto, // Any open device will be picked. Prioritizes PCIe device if not already in use.
49 ePCIe, // Attempt to use a PCIe device for this model.
50 eUSB // Attempt to use a USB device for this model.
51 };
52
53 // Enumerator for selecting performance mode of EdgeTPU devices.
54 enum PerformanceModes
55 {
56 eLow, // Power saver mode. Low power draw and little heat output, but not great performance.
57 eMedium, // Balanced. Medium power draw, medium performance.
58 eHigh, // Performance mode. High power draw and increased heat output, great performance.
59 eMax // Maximum clock speed. Max performance, but greatest power draw and heat output. Could damage device in hot environments.
60 };
61
63 // Declare public methods and member variables.
65
66
82 TensorflowTPU(std::string szModelPath, PerformanceModes ePowerMode = eHigh, unsigned int unMaxBulkInQueueLength = 32, bool bUSBAlwaysDFU = false)
83 {
84 // Initialize member variables.
85 m_szModelPath = szModelPath;
86 m_tpuDeviceOptions["Usb.MaxBulkInQueueLength"] = std::to_string(unMaxBulkInQueueLength);
87 m_bDeviceOpened = false;
88
89 // Determine which power mode should be set.
90 switch (ePowerMode)
91 {
92 case eLow: m_tpuDeviceOptions["Performance"] = "Low"; break;
93 case eMedium: m_tpuDeviceOptions["Performance"] = "Medium"; break;
94 case eHigh: m_tpuDeviceOptions["Performance"] = "High"; break;
95 case eMax: m_tpuDeviceOptions["Performance"] = "Max"; break;
96 default: m_tpuDeviceOptions["Performance"] = "High"; break;
97 }
98
99 // Determine if firmware should be loaded ever time code is started.
100 if (bUSBAlwaysDFU)
101 {
102 // Always load firmware.
103 m_tpuDeviceOptions["Usb.AlwaysDfu"] = "True";
104 }
105 else
106 {
107 // Only load firmware on first init of device.
108 m_tpuDeviceOptions["Usb.AlwaysDfu"] = "False";
109 }
110 }
111
112
120 {
121 // Check if device has been opened.
122 if (m_bDeviceOpened)
123 {
124 // Close tflite interpreter.
125 m_pInterpreter.reset();
126 // Close edgetpu hardware.
127 m_pEdgeTPUContext.reset();
128 // Close model.
129 m_pTFLiteModel.reset();
130 }
131 }
132
133
141 {
142 // Set opened toggle.
143 m_bDeviceOpened = false;
144 // Close tflite interpreter.
145 m_pInterpreter.reset();
146 // Close edgetpu hardware.
147 m_pEdgeTPUContext.reset();
148 // Close model.
149 m_pTFLiteModel.reset();
150 }
151
152
162 TfLiteStatus OpenAndLoad(DeviceType eDeviceType = eAuto)
163 {
164 // Create instance variables.
165 TfLiteStatus tfReturnStatus = TfLiteStatus::kTfLiteCancelled;
166 std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> vValidDevices;
167
168 // Determine which device is going to be used for this model.
169 switch (eDeviceType)
170 {
171 case eAuto: m_tpuDevice.type = edgetpu::DeviceType(-1); break;
172 case ePCIe: m_tpuDevice.type = edgetpu::DeviceType::kApexPci; break;
173 case eUSB: m_tpuDevice.type = edgetpu::DeviceType::kApexUsb; break;
174 default: m_tpuDevice.type = edgetpu::DeviceType(-1); break;
175 }
176
177 // Load compiled Edge TPU model as a flatbuffer model.
178 m_pTFLiteModel = tflite::FlatBufferModel::VerifyAndBuildFromFile(m_szModelPath.c_str());
179 // Check if model was successfully opened.
180 if (m_pTFLiteModel != nullptr)
181 {
182 // Get a list of available devices and already opened devices.
183 std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> vDevices = this->GetHardwareDevices();
184 std::vector<std::shared_ptr<edgetpu::EdgeTpuContext>> vAlreadyOpenedDevices = this->GetOpenedHardwareDevices();
185
186 // Get list of valid, unopened devices.
187 // Loop through available devices.
188 for (unsigned int unIter = 0; unIter < vDevices.size(); ++unIter)
189 {
190 // Create instance variables.
191 bool bValidDevice = true;
192
193 // Loop through all opened devices.
194 for (unsigned int nJter = 0; nJter < vAlreadyOpenedDevices.size(); ++nJter)
195 {
196 // Check if current available device has already been opened.
197 if (vAlreadyOpenedDevices[nJter]->GetDeviceEnumRecord().path == vDevices[unIter].path)
198 {
199 // Set device as not valid.
200 bValidDevice = false;
201 }
202 // Determine if we should check device type.
203 else if (eDeviceType != eAuto)
204 {
205 // Check if device type matches.
206 if (vDevices[unIter].type != m_tpuDevice.type)
207 {
208 // Set device as not valid.
209 bValidDevice = false;
210 }
211 }
212 }
213
214 // Check if still valid.
215 if (bValidDevice)
216 {
217 // Append to valid devices vector.
218 vValidDevices.emplace_back(vDevices[unIter]);
219 }
220 }
221
222 // Check if any valid devices were found.
223 if (vValidDevices.size() > 0)
224 {
225 // Loop through each device until one successfully opens.
226 for (unsigned int unIter = 0; unIter < vValidDevices.size() && !m_bDeviceOpened; ++unIter)
227 {
228 // Submit logger message.
229 LOG_INFO(logging::g_qSharedLogger,
230 "Attempting to load {} onto {} device at {} ({})...",
231 m_szModelPath,
232 this->DeviceTypeToString(vValidDevices[unIter].type),
233 vValidDevices[unIter].path,
234 this->DeviceTypeToString(vValidDevices[unIter].type));
235
236 // Attempt to open device.
237 m_pEdgeTPUContext = this->GetEdgeManager()->OpenDevice(vValidDevices[unIter].type, vValidDevices[unIter].path, m_tpuDeviceOptions);
238
239 // Only proceed if device opened.
240 if (m_pEdgeTPUContext != nullptr && m_pEdgeTPUContext->IsReady())
241 {
242 // Create custom tflite operations for edge tpu.
243 tflite::ops::builtin::BuiltinOpResolverWithXNNPACK tfResolver;
244 tfResolver.AddCustom(edgetpu::kCustomOp, edgetpu::RegisterCustomOp());
245 // Create tflite interpreter with model and operations resolver.
246 if (tflite::InterpreterBuilder(*m_pTFLiteModel, tfResolver)(&m_pInterpreter) != kTfLiteOk)
247 {
248 // Submit logger message.
249 LOG_ERROR(logging::g_qSharedLogger,
250 "Unable to build interpreter for model {} with device {} ({})",
251 m_szModelPath,
252 vValidDevices[unIter].path,
253 this->DeviceTypeToString(vValidDevices[unIter].type));
254
255 // Release interpreter and context.
256 m_pInterpreter.reset();
257 m_pEdgeTPUContext.reset();
258
259 // Update return status.
260 tfReturnStatus = TfLiteStatus::kTfLiteUnresolvedOps;
261 }
262 else
263 {
264 // Bind the given context device with interpreter.
265 m_pInterpreter->SetExternalContext(kTfLiteEdgeTpuContext, m_pEdgeTPUContext.get());
266 // Attempt to allocate necessary tensors for model onto device.
267 if (m_pInterpreter->AllocateTensors() != kTfLiteOk)
268 {
269 // Submit logger message.
270 LOG_WARNING(logging::g_qSharedLogger,
271 "Even though device was opened and interpreter was built, allocation of tensors failed for model {} with device {} ({})",
272 m_szModelPath,
273 vValidDevices[unIter].path,
274 this->DeviceTypeToString(vValidDevices[unIter].type));
275
276 // Release interpreter and context.
277 m_pInterpreter.reset();
278 m_pEdgeTPUContext.reset();
279
280 // Update return status.
281 tfReturnStatus = TfLiteStatus::kTfLiteDelegateDataWriteError;
282 }
283 else
284 {
285 // Submit logger message.
286 LOG_INFO(logging::g_qSharedLogger,
287 "Successfully opened and loaded model {} with device {} ({})",
288 m_szModelPath,
289 vValidDevices[unIter].path,
290 this->DeviceTypeToString(vValidDevices[unIter].type));
291
292 // Set toggle that model is opened with device.
293 m_bDeviceOpened = true;
294
295 // Update return status.
296 tfReturnStatus = TfLiteStatus::kTfLiteOk;
297 }
298 }
299 }
300 else
301 {
302 // Submit logger message.
303 LOG_ERROR(logging::g_qSharedLogger,
304 "Unable to open device {} ({}) for model {}.",
305 vValidDevices[unIter].path,
306 this->DeviceTypeToString(vValidDevices[unIter].type),
307 m_szModelPath);
308 }
309 }
310 }
311 else
312 {
313 // Submit logger message.
314 LOG_ERROR(logging::g_qSharedLogger,
315 "No valid devices were found for model {}. Device type is {}",
316 m_szModelPath,
317 this->DeviceTypeToString(m_tpuDevice.type));
318 }
319 }
320 else
321 {
322 // Submit logger message.
323 LOG_ERROR(logging::g_qSharedLogger, "Unable to load model {}. Does it exist at this path? Is this actually compiled for the EdgeTPU?", m_szModelPath);
324 }
325
326 // Return status.
327 return tfReturnStatus;
328 }
329
331 // Setters
333
335 // Getters
337
338
347 bool GetDeviceIsOpened() const { return m_bDeviceOpened; }
348
349
358 static std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> GetHardwareDevices()
359 {
360 // Create instance variables.
361 edgetpu::EdgeTpuManager* tpuEdgeManagerInstance = edgetpu::EdgeTpuManager::GetSingleton();
362
363 // Check if edgetpu singleton objects are supported.
364 if (tpuEdgeManagerInstance != nullptr)
365 {
366 // Get a list of devices from the edgetpu api.
367 return tpuEdgeManagerInstance->EnumerateEdgeTpu();
368 }
369 else
370 {
371 // Return empty vector.
372 return std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord>();
373 }
374 }
375
376
385 static std::vector<std::shared_ptr<edgetpu::EdgeTpuContext>> GetOpenedHardwareDevices()
386 {
387 // Create instance variables.
388 edgetpu::EdgeTpuManager* tpuEdgeManagerInstance = edgetpu::EdgeTpuManager::GetSingleton();
389
390 // Check if edgetpu singleton objects are supported.
391 if (tpuEdgeManagerInstance != nullptr)
392 {
393 // Get a list of devices from the edgetpu api.
394 return tpuEdgeManagerInstance->GetOpenedDevices();
395 }
396 else
397 {
398 // Return empty vector.
399 return std::vector<std::shared_ptr<edgetpu::EdgeTpuContext>>();
400 }
401 }
402
403 protected:
405 // Declare protected methods.
407
408
417 edgetpu::EdgeTpuManager* GetEdgeManager()
418 {
419 // Create instance variables.
420 edgetpu::EdgeTpuManager* tpuEdgeManagerInstance = edgetpu::EdgeTpuManager::GetSingleton();
421
422 // Check if edgetpu singleton objects are supported.
423 if (tpuEdgeManagerInstance == nullptr)
424 {
425 // Submit logger message.
426 LOG_CRITICAL(logging::g_qSharedLogger, "Unable to get EdgeTPU manager! This operating system does not support singletons.");
427 }
428
429 // Get a list of devices from the edgetpu api.
430 return tpuEdgeManagerInstance;
431 }
432
433
442 std::string DeviceTypeToString(edgetpu::DeviceType eDeviceType)
443 {
444 // Determine which device type string should be returned.
445 switch (eDeviceType)
446 {
447 case edgetpu::DeviceType::kApexUsb: return "USB";
448 case edgetpu::DeviceType::kApexPci: return "PCIe";
449 default: return "Not Found";
450 }
451 }
452
454 // Declare protected member variables.
456 std::string m_szModelPath;
457 edgetpu::EdgeTpuManager::DeviceEnumerationRecord m_tpuDevice;
458 edgetpu::EdgeTpuManager::DeviceOptions m_tpuDeviceOptions;
459 std::unique_ptr<tflite::FlatBufferModel> m_pTFLiteModel;
460 std::shared_ptr<edgetpu::EdgeTpuContext> m_pEdgeTPUContext;
461 std::unique_ptr<tflite::Interpreter> m_pInterpreter;
462 bool m_bDeviceOpened;
463
464 private:
466 // Declare private methods.
468
469 // Declare interface class pure virtual functions. (These must be overriden by inheritor.)
470 virtual T Inference(const P& tInput, const float fMinObjectConfidence, const float fNMSThreshold) = 0;
471
473 // Declare private member variables.
475};
476#endif
This class is designed to enable quick, easy, and robust handling of .tflite models for deployment an...
Definition TensorflowTPU.hpp:39
edgetpu::EdgeTpuManager * GetEdgeManager()
Retrieves a pointer to an EdgeTPUManager instance from the libedgetpu library.
Definition TensorflowTPU.hpp:417
~TensorflowTPU()
Destroy the TensorflowTPU object.
Definition TensorflowTPU.hpp:119
static std::vector< edgetpu::EdgeTpuManager::DeviceEnumerationRecord > GetHardwareDevices()
Retrieve a list of EdgeTPU devices from the edge API.
Definition TensorflowTPU.hpp:358
void CloseHardware()
Release all hardware and reset models and interpreters.
Definition TensorflowTPU.hpp:140
TfLiteStatus OpenAndLoad(DeviceType eDeviceType=eAuto)
Attempt to open the model at the given path and load it onto the EdgeTPU device.
Definition TensorflowTPU.hpp:162
static std::vector< std::shared_ptr< edgetpu::EdgeTpuContext > > GetOpenedHardwareDevices()
Retrieve a list of already opened EdgeTPU devices from the edge API.
Definition TensorflowTPU.hpp:385
TensorflowTPU(std::string szModelPath, PerformanceModes ePowerMode=eHigh, unsigned int unMaxBulkInQueueLength=32, bool bUSBAlwaysDFU=false)
Construct a new TensorflowTPU object.
Definition TensorflowTPU.hpp:82
std::string DeviceTypeToString(edgetpu::DeviceType eDeviceType)
to_string method for converting a device type to a readable string.
Definition TensorflowTPU.hpp:442
bool GetDeviceIsOpened() const
Accessor for the Device Is Opened private member.
Definition TensorflowTPU.hpp:347