83 PerformanceModes ePowerMode = PerformanceModes::eHigh,
84 unsigned int unMaxBulkInQueueLength = 32,
85 bool bUSBAlwaysDFU =
false)
88 m_szModelPath = szModelPath;
89 m_tpuDeviceOptions[
"Usb.MaxBulkInQueueLength"] = std::to_string(unMaxBulkInQueueLength);
90 m_bDeviceOpened =
false;
95 case PerformanceModes::eLow: m_tpuDeviceOptions[
"Performance"] =
"Low";
break;
96 case PerformanceModes::eMedium: m_tpuDeviceOptions[
"Performance"] =
"Medium";
break;
97 case PerformanceModes::eHigh: m_tpuDeviceOptions[
"Performance"] =
"High";
break;
98 case PerformanceModes::eMax: m_tpuDeviceOptions[
"Performance"] =
"Max";
break;
99 default: m_tpuDeviceOptions[
"Performance"] =
"High";
break;
106 m_tpuDeviceOptions[
"Usb.AlwaysDfu"] =
"True";
111 m_tpuDeviceOptions[
"Usb.AlwaysDfu"] =
"False";
165 TfLiteStatus
OpenAndLoad(DeviceType eDeviceType = DeviceType::eAuto)
168 TfLiteStatus tfReturnStatus = TfLiteStatus::kTfLiteCancelled;
169 std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> vValidDevices;
174 case DeviceType::eAuto: m_tpuDevice.type = edgetpu::DeviceType(-1);
break;
175 case DeviceType::ePCIe: m_tpuDevice.type = edgetpu::DeviceType::kApexPci;
break;
176 case DeviceType::eUSB: m_tpuDevice.type = edgetpu::DeviceType::kApexUsb;
break;
177 default: m_tpuDevice.type = edgetpu::DeviceType(-1);
break;
181 m_pTFLiteModel = tflite::FlatBufferModel::VerifyAndBuildFromFile(m_szModelPath.c_str());
183 if (m_pTFLiteModel !=
nullptr)
186 std::vector<edgetpu::EdgeTpuManager::DeviceEnumerationRecord> vDevices = this->
GetHardwareDevices();
191 for (
unsigned int unIter = 0; unIter < vDevices.size(); ++unIter)
194 bool bValidDevice =
true;
197 for (
unsigned int nJter = 0; nJter < vAlreadyOpenedDevices.size(); ++nJter)
200 if (vAlreadyOpenedDevices[nJter]->GetDeviceEnumRecord().path == vDevices[unIter].path)
203 bValidDevice =
false;
206 else if (eDeviceType != DeviceType::eAuto)
209 if (vDevices[unIter].type != m_tpuDevice.type)
212 bValidDevice =
false;
221 vValidDevices.emplace_back(vDevices[unIter]);
226 if (vValidDevices.size() > 0)
229 for (
unsigned int unIter = 0; unIter < vValidDevices.size() && !m_bDeviceOpened; ++unIter)
232 LOG_INFO(logging::g_qSharedLogger,
233 "Attempting to load {} onto {} device at {} ({})...",
236 vValidDevices[unIter].path,
240 m_pEdgeTPUContext = this->
GetEdgeManager()->OpenDevice(vValidDevices[unIter].type, vValidDevices[unIter].path, m_tpuDeviceOptions);
243 if (m_pEdgeTPUContext !=
nullptr && m_pEdgeTPUContext->IsReady())
246 tflite::ops::builtin::BuiltinOpResolverWithXNNPACK tfResolver;
247 tfResolver.AddCustom(edgetpu::kCustomOp, edgetpu::RegisterCustomOp());
249 if (tflite::InterpreterBuilder(*m_pTFLiteModel, tfResolver)(&m_pInterpreter) != kTfLiteOk)
252 LOG_ERROR(logging::g_qSharedLogger,
253 "Unable to build interpreter for model {} with device {} ({})",
255 vValidDevices[unIter].path,
259 m_pInterpreter.reset();
260 m_pEdgeTPUContext.reset();
263 tfReturnStatus = TfLiteStatus::kTfLiteUnresolvedOps;
268 m_pInterpreter->SetExternalContext(kTfLiteEdgeTpuContext, m_pEdgeTPUContext.get());
270 if (m_pInterpreter->AllocateTensors() != kTfLiteOk)
273 LOG_WARNING(logging::g_qSharedLogger,
274 "Even though device was opened and interpreter was built, allocation of tensors failed for model {} with device {} ({})",
276 vValidDevices[unIter].path,
280 m_pInterpreter.reset();
281 m_pEdgeTPUContext.reset();
284 tfReturnStatus = TfLiteStatus::kTfLiteDelegateDataWriteError;
289 LOG_INFO(logging::g_qSharedLogger,
290 "Successfully opened and loaded model {} with device {} ({})",
292 vValidDevices[unIter].path,
296 m_bDeviceOpened =
true;
299 tfReturnStatus = TfLiteStatus::kTfLiteOk;
306 LOG_ERROR(logging::g_qSharedLogger,
307 "Unable to open device {} ({}) for model {}.",
308 vValidDevices[unIter].path,
317 LOG_ERROR(logging::g_qSharedLogger,
318 "No valid devices were found for model {}. Device type is {}",
326 LOG_ERROR(logging::g_qSharedLogger,
"Unable to load model {}. Does it exist at this path? Is this actually compiled for the EdgeTPU?", m_szModelPath);
330 return tfReturnStatus;