82 TensorflowTPU(std::string szModelPath, PerformanceModes ePowerMode = eHigh,
unsigned int unMaxBulkInQueueLength = 32,
bool bUSBAlwaysDFU =
false)
85 m_szModelPath = szModelPath;
86 m_tpuDeviceOptions[
"Usb.MaxBulkInQueueLength"] = std::to_string(unMaxBulkInQueueLength);
87 m_bDeviceOpened =
false;
92 case eLow: m_tpuDeviceOptions[
"Performance"] =
"Low";
break;
93 case eMedium: m_tpuDeviceOptions[
"Performance"] =
"Medium";
break;
94 case eHigh: m_tpuDeviceOptions[
"Performance"] =
"High";
break;
95 case eMax: m_tpuDeviceOptions[
"Performance"] =
"Max";
break;
96 default: m_tpuDeviceOptions[
"Performance"] =
"High";
break;
103 m_tpuDeviceOptions[
"Usb.AlwaysDfu"] =
"True";
108 m_tpuDeviceOptions[
"Usb.AlwaysDfu"] =
"False";
165 TfLiteStatus tfReturnStatus = TfLiteStatus::kTfLiteCancelled;
166 std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> vValidDevices;
171 case eAuto: m_tpuDevice.type = edgetpu::DeviceType(-1);
break;
172 case ePCIe: m_tpuDevice.type = edgetpu::DeviceType::kApexPci;
break;
173 case eUSB: m_tpuDevice.type = edgetpu::DeviceType::kApexUsb;
break;
174 default: m_tpuDevice.type = edgetpu::DeviceType(-1);
break;
178 m_pTFLiteModel = tflite::FlatBufferModel::VerifyAndBuildFromFile(m_szModelPath.c_str());
180 if (m_pTFLiteModel !=
nullptr)
183 std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> vDevices = this->
GetHardwareDevices();
188 for (
unsigned int unIter = 0; unIter < vDevices.size(); ++unIter)
191 bool bValidDevice =
true;
194 for (
unsigned int nJter = 0; nJter < vAlreadyOpenedDevices.size(); ++nJter)
197 if (vAlreadyOpenedDevices[nJter]->GetDeviceEnumRecord().path == vDevices[unIter].path)
200 bValidDevice =
false;
203 else if (eDeviceType != eAuto)
206 if (vDevices[unIter].type != m_tpuDevice.type)
209 bValidDevice =
false;
218 vValidDevices.emplace_back(vDevices[unIter]);
223 if (vValidDevices.size() > 0)
226 for (
unsigned int unIter = 0; unIter < vValidDevices.size() && !m_bDeviceOpened; ++unIter)
229 LOG_INFO(logging::g_qSharedLogger,
230 "Attempting to load {} onto {} device at {} ({})...",
233 vValidDevices[unIter].path,
237 m_pEdgeTPUContext = this->
GetEdgeManager()->OpenDevice(vValidDevices[unIter].type, vValidDevices[unIter].path, m_tpuDeviceOptions);
240 if (m_pEdgeTPUContext !=
nullptr && m_pEdgeTPUContext->IsReady())
243 tflite::ops::builtin::BuiltinOpResolverWithXNNPACK tfResolver;
244 tfResolver.AddCustom(edgetpu::kCustomOp, edgetpu::RegisterCustomOp());
246 if (tflite::InterpreterBuilder(*m_pTFLiteModel, tfResolver)(&m_pInterpreter) != kTfLiteOk)
249 LOG_ERROR(logging::g_qSharedLogger,
250 "Unable to build interpreter for model {} with device {} ({})",
252 vValidDevices[unIter].path,
256 m_pInterpreter.reset();
257 m_pEdgeTPUContext.reset();
260 tfReturnStatus = TfLiteStatus::kTfLiteUnresolvedOps;
265 m_pInterpreter->SetExternalContext(kTfLiteEdgeTpuContext, m_pEdgeTPUContext.get());
267 if (m_pInterpreter->AllocateTensors() != kTfLiteOk)
270 LOG_WARNING(logging::g_qSharedLogger,
271 "Even though device was opened and interpreter was built, allocation of tensors failed for model {} with device {} ({})",
273 vValidDevices[unIter].path,
277 m_pInterpreter.reset();
278 m_pEdgeTPUContext.reset();
281 tfReturnStatus = TfLiteStatus::kTfLiteDelegateDataWriteError;
286 LOG_INFO(logging::g_qSharedLogger,
287 "Successfully opened and loaded model {} with device {} ({})",
289 vValidDevices[unIter].path,
293 m_bDeviceOpened =
true;
296 tfReturnStatus = TfLiteStatus::kTfLiteOk;
303 LOG_ERROR(logging::g_qSharedLogger,
304 "Unable to open device {} ({}) for model {}.",
305 vValidDevices[unIter].path,
314 LOG_ERROR(logging::g_qSharedLogger,
315 "No valid devices were found for model {}. Device type is {}",
323 LOG_ERROR(logging::g_qSharedLogger,
"Unable to load model {}. Does it exist at this path? Is this actually compiled for the EdgeTPU?", m_szModelPath);
327 return tfReturnStatus;