From 42706306342da47fdcfaeb5885e11c731948b65d Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Mon, 18 May 2020 10:43:44 -0600 Subject: [PATCH 01/29] Update version number --- configure.ac | 2 +- meson.build | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.ac b/configure.ac index fdd9e5dd..d427ed31 100644 --- a/configure.ac +++ b/configure.ac @@ -10,7 +10,7 @@ # Initialize autoconf. AC_PREREQ([2.69]) -AC_INIT([RidgeRun inference library],[0.6.0],[https://github.com/RidgeRun/r2inference/issues],[r2inference]) +AC_INIT([RidgeRun inference library],[0.7.0],[https://github.com/RidgeRun/r2inference/issues],[r2inference]) # Initialize our build utils RR_INIT diff --git a/meson.build b/meson.build index 524870cb..55f57264 100644 --- a/meson.build +++ b/meson.build @@ -1,5 +1,5 @@ project('r2inference', ['cpp'], default_options : ['cpp_std=c++11'], - version : '0.6.0', + version : '0.7.0', meson_version : '>= 0.50',) # Set project information From c0e18c590b6fe2cde658842e13bf70209982cd84 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Sat, 16 May 2020 09:23:51 -0600 Subject: [PATCH 02/29] Add EdgeTPU enable flag --- configure.ac | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/configure.ac b/configure.ac index d427ed31..14023115 100644 --- a/configure.ac +++ b/configure.ac @@ -50,12 +50,17 @@ RR_CHECK_FEATURE_LIB(TENSORFLOW, TensorFlow Installation, tensorflow, TF_Version, tensorflow/c/c_api.h, no) AC_LANG_PUSH([C++]) +RR_CHECK_FEATURE_LIB(EDGETPU, EdgeTPU with TensorFlow lite Installation, + :libedgetpu.so.1.0, edgetpu_version, edgetpu.h, no) + RR_CHECK_FEATURE_LIB(TFLITE, TensorFlow lite Installation, tensorflow-lite, TfLiteTypeGetName, tensorflow/lite/model.h, no) # add specific LIBS for TFLITE AC_SUBST([TFLITE_LIBS], ["$TFLITE_LIBS -pthread -ldl -lrt"]) +AM_CONDITIONAL([HAVE_EDGETPU], [AM_COND_IF([HAVE_TFLITE], [], [AC_MSG_ERROR(The EdgeTPU backend needs TFLITE enable)])]) + RR_CHECK_FEATURE_LIB(TENSORRT, TensorRT Installation, nvinfer, createInferBuilder_INTERNAL, NvInfer.h, no) AC_LANG_POP([C++]) From d13a8ff9dee29bf4dbc62f97e24c62bc757bf1ed Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Sat, 16 May 2020 13:09:34 -0600 Subject: [PATCH 03/29] Add virtual methods to tflite engine --- r2i/tflite/engine.cc | 49 ++++++++++++++++++++++++++++++++------------ r2i/tflite/engine.h | 9 +++++++- 2 files changed, 44 insertions(+), 14 deletions(-) diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 9bc726ec..cafb75e3 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -14,7 +14,6 @@ #include "r2i/tflite/prediction.h" #include "r2i/tflite/frame.h" #include -#include #include namespace r2i { @@ -75,6 +74,8 @@ RuntimeError Engine::Start () { if (!this->interpreter) { ::tflite::ops::builtin::BuiltinOpResolver resolver; + this->setupResolver(resolver); + ::tflite::ErrorReporter *error_reporter = ::tflite::DefaultErrorReporter(); std::unique_ptr<::tflite::Interpreter> interpreter; @@ -88,6 +89,8 @@ RuntimeError Engine::Start () { return error; } + this->setInterpreterContext(); + std::shared_ptr<::tflite::Interpreter> tflite_interpreter_shared{std::move(interpreter)}; this->interpreter = tflite_interpreter_shared; @@ -200,6 +203,36 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } + auto tensor_data = this->runInference(frame, input, wanted_width, wanted_height, + wanted_channels, error); + if (r2i::RuntimeError::EOK != error.GetCode()) { + return nullptr; + } + + int output = this->interpreter->outputs()[0]; + TfLiteIntArray *output_dims = this->interpreter->tensor(output)->dims; + auto output_size = GetRequiredBufferSize(output_dims) * sizeof(float); + prediction->SetTensorValues(tensor_data, output_size); + + return prediction; +} + +Engine::~Engine () { + this->Stop(); +} + +void Engine::setupResolver(::tflite::ops::builtin::BuiltinOpResolver + &/*resolver*/) { + // No implementation for tflite engine +} + +void Engine::setInterpreterContext() { + // No implementation for tflite engine +} + +float *Engine::runInference(std::shared_ptr frame, + const int &input, const int &width, const int &height, const int &channels, + r2i::RuntimeError &error) { auto input_tensor = this->interpreter->typed_tensor(input); auto input_data = (float *)frame->GetData(); @@ -209,7 +242,7 @@ std::shared_ptr Engine::Predict (std::shared_ptr } memcpy(input_tensor, input_data, - wanted_height * wanted_width * wanted_channels * sizeof(float)); + height * width * channels * sizeof(float)); if (this->interpreter->Invoke() != kTfLiteOk) { error.Set (RuntimeError::Code::FRAMEWORK_ERROR, @@ -217,17 +250,7 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - int output = this->interpreter->outputs()[0]; - TfLiteIntArray *output_dims = this->interpreter->tensor(output)->dims; - auto output_size = GetRequiredBufferSize(output_dims) * sizeof(float); - auto *tensor_data = this->interpreter->typed_output_tensor(0); - prediction->SetTensorValues(tensor_data, output_size); - - return prediction; -} - -Engine::~Engine () { - this->Stop(); + return this->interpreter->typed_output_tensor(0); } } //namespace tflite diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index 010e5204..9a99ae76 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -17,6 +17,7 @@ #include #include +#include namespace r2i { namespace tflite { @@ -41,7 +42,7 @@ class Engine : public IEngine { ~Engine (); - private: + protected: enum State { STARTED, STOPPED @@ -52,6 +53,12 @@ class Engine : public IEngine { std::shared_ptr model; int number_of_threads; int allow_fp16; + + virtual void setupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver); + virtual void setInterpreterContext(); + virtual float *runInference(std::shared_ptr frame, + const int &input, const int &width, const int &height, const int &channels, + r2i::RuntimeError &error); }; } From be44cae48c135bed73015303ee11ea6a8bcc72ea Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Sat, 16 May 2020 13:12:36 -0600 Subject: [PATCH 04/29] Add EdgeTPU engine --- configure.ac | 3 +- r2i/Makefile.am | 12 ++++- r2i/edgetpu/Makefile.am | 46 +++++++++++++++++++ r2i/edgetpu/engine.cc | 98 +++++++++++++++++++++++++++++++++++++++++ r2i/edgetpu/engine.h | 37 ++++++++++++++++ 5 files changed, 194 insertions(+), 2 deletions(-) create mode 100644 r2i/edgetpu/Makefile.am create mode 100644 r2i/edgetpu/engine.cc create mode 100644 r2i/edgetpu/engine.h diff --git a/configure.ac b/configure.ac index 14023115..0ea90d4b 100644 --- a/configure.ac +++ b/configure.ac @@ -59,7 +59,7 @@ RR_CHECK_FEATURE_LIB(TFLITE, TensorFlow lite Installation, # add specific LIBS for TFLITE AC_SUBST([TFLITE_LIBS], ["$TFLITE_LIBS -pthread -ldl -lrt"]) -AM_CONDITIONAL([HAVE_EDGETPU], [AM_COND_IF([HAVE_TFLITE], [], [AC_MSG_ERROR(The EdgeTPU backend needs TFLITE enable)])]) +AM_COND_IF([HAVE_EDGETPU], [AM_COND_IF([HAVE_TFLITE], [], [AC_MSG_ERROR(The EdgeTPU backend needs TFLITE enable)])], []) RR_CHECK_FEATURE_LIB(TENSORRT, TensorRT Installation, nvinfer, createInferBuilder_INTERNAL, NvInfer.h, no) @@ -88,6 +88,7 @@ r2i/ncsdk/Makefile r2i/tensorflow/Makefile r2i/tensorrt/Makefile r2i/tflite/Makefile +r2i/edgetpu/Makefile r2inference-${RR_PACKAGE_VERSION}.pc:r2inference.pc.in tests/Makefile tests/acceptance/Makefile diff --git a/r2i/Makefile.am b/r2i/Makefile.am index e8470e24..5cdf58d9 100644 --- a/r2i/Makefile.am +++ b/r2i/Makefile.am @@ -26,11 +26,16 @@ if HAVE_TENSORRT SUBDIRS += tensorrt endif +if HAVE_EDGETPU +SUBDIRS += edgetpu +endif + DIST_SUBDIRS = \ ncsdk \ tensorflow \ tensorrt \ - tflite + tflite \ + edgetpu lib_LTLIBRARIES = libr2inference-@RR_PACKAGE_VERSION@.la @@ -95,3 +100,8 @@ if HAVE_TFLITE libr2inference_@RR_PACKAGE_VERSION@_la_LIBADD += \ $(top_builddir)/r2i/tflite/libtflite.la endif + +if HAVE_EDGETPU +libr2inference_@RR_PACKAGE_VERSION@_la_LIBADD += \ + $(top_builddir)/r2i/edgetpu/libedgetpu.la +endif diff --git a/r2i/edgetpu/Makefile.am b/r2i/edgetpu/Makefile.am new file mode 100644 index 00000000..497cf0a1 --- /dev/null +++ b/r2i/edgetpu/Makefile.am @@ -0,0 +1,46 @@ +# Copyright (C) 2018 RidgeRun, LLC (http://www.ridgerun.com) +# All Rights Reserved. +# +# The contents of this software are proprietary and confidential to RidgeRun, +# LLC. No part of this program may be photocopied, reproduced or translated +# into another programming language without prior written consent of +# RidgeRun, LLC. The user is free to modify the source code after obtaining +# a software license from RidgeRun. All source code changes must be provided +# back to RidgeRun without any encumbrance. + +if HAVE_EDGETPU + +noinst_LTLIBRARIES = libedgetpu.la + +# Define a custom area for our headers +tensorflowliteedgetpuincludedir = @R2IINCLUDEDIR@/r2i/edgetpu + +libedgetpu_la_SOURCES = \ + engine.cc + +libedgetpu_la_CPPFLAGS = \ + $(RR_CPPFLAGS) \ + $(CODE_COVERAGE_CPPFLAGS) + +libedgetpu_la_CFLAGS = \ + $(RR_CFLAGS) \ + $(TFLITE_CFLAGS) \ + $(EDGETPU_CFLAGS) \ + $(CODE_COVERAGE_CFLAGS) + +libedgetpu_la_CXXFLAGS = \ + $(RR_CXXFLAGS) \ + $(TFLITE_CFLAGS) \ + $(EDGETPU_CFLAGS) \ + $(CODE_COVERAGE_CXXFLAGS) + +libedgetpu_la_LIBADD = \ + $(RR_LIBS) \ + $(TFLITE_LIBS) \ + $(EDGETPU_LIBS) \ + $(CODE_COVERAGE_LIBS) + +tensorflowliteedgetpuinclude_HEADERS = \ + engine.h + +endif # HAVE_EDGETPU diff --git a/r2i/edgetpu/engine.cc b/r2i/edgetpu/engine.cc new file mode 100644 index 00000000..1a2050b0 --- /dev/null +++ b/r2i/edgetpu/engine.cc @@ -0,0 +1,98 @@ +/* Copyright (C) 2018-2020 RidgeRun, LLC (http://www.ridgerun.com) + * All Rights Reserved. + * + * The contents of this software are proprietary and confidential to RidgeRun, + * LLC. No part of this program may be photocopied, reproduced or translated + * into another programming language without prior written consent of + * RidgeRun, LLC. The user is free to modify the source code after obtaining + * a software license from RidgeRun. All source code changes must be provided + * back to RidgeRun without any encumbrance. + */ + +#include "r2i/edgetpu/engine.h" + +#include + +namespace r2i { +namespace edgetpu { + +Engine::Engine () { + this->state = r2i::tflite::Engine::State::STOPPED; + this->model = nullptr; + this->number_of_threads = 1; + this->allow_fp16 = 0; +} + +Engine::~Engine () { + r2i::tflite::Engine::Stop(); +} + +void Engine::setupResolver(::tflite::ops::builtin::BuiltinOpResolver + &resolver) { + resolver.AddCustom(::edgetpu::kCustomOp, ::edgetpu::RegisterCustomOp()); +} + +void Engine::setInterpreterContext() { + std::shared_ptr<::edgetpu::EdgeTpuContext> edgetpu_context = + ::edgetpu::EdgeTpuManager::GetSingleton()->OpenDevice(); + this->interpreter->SetExternalContext(kTfLiteEdgeTpuContext, + edgetpu_context.get()); +} + +float *Engine::runInference(std::shared_ptr frame, + const int &input, const int &width, const int &height, const int &channels, + r2i::RuntimeError &error) { + auto input_tensor = this->interpreter->typed_tensor(input); + auto input_data = (float *)frame->GetData(); + + if (!input_data) { + error.Set (RuntimeError::Code::FRAMEWORK_ERROR, "Failed to get image data"); + return nullptr; + } + + // Convert to fixed point + std::unique_ptr input_data_fixed(new uint8_t(height * width * + channels)); + int index; + for (index = 0; index < height * width * channels; index++) { + input_data_fixed.get()[index] = static_cast(input_data[index]); + } + + memcpy(input_tensor, input_data_fixed.get(), + height * width * channels * sizeof(uint8_t)); + + if (this->interpreter->Invoke() != kTfLiteOk) { + error.Set (RuntimeError::Code::FRAMEWORK_ERROR, + "Failed to invoke tflite!"); + return nullptr; + } + + const auto &output_indices = interpreter->outputs(); + const auto *out_tensor = interpreter->tensor(output_indices[0]); + std::unique_ptr output_data; + + if (out_tensor->type == kTfLiteUInt8) { + uint8_t *output_data_fixed = interpreter->typed_output_tensor(0); + TfLiteIntArray *output_dims = this->interpreter->tensor( + output_indices[0])->dims; + + // Convert to fixed point + auto output_size = GetRequiredBufferSize(output_dims); + output_data = std::unique_ptr(new float(output_size)); + for (index = 0; index < output_size; index++) { + output_data.get()[index] = static_cast(output_data_fixed[index]); + } + } else if (out_tensor->type == kTfLiteFloat32) { + output_data = std::unique_ptr(interpreter->typed_output_tensor + (0)); + } else { + error.Set (RuntimeError::Code::FRAMEWORK_ERROR, + "Output tensor has unsupported output type"); + return nullptr; + } + + return output_data.get(); +} + +} //namepsace edgetpu +} //namepsace r2i \ No newline at end of file diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h new file mode 100644 index 00000000..0e2baa57 --- /dev/null +++ b/r2i/edgetpu/engine.h @@ -0,0 +1,37 @@ +/* Copyright (C) 2018 RidgeRun, LLC (http://www.ridgerun.com) + * All Rights Reserved. + * + * The contents of this software are proprietary and confidential to RidgeRun, + * LLC. No part of this program may be photocopied, reproduced or translated + * into another programming language without prior written consent of + * RidgeRun, LLC. The user is free to modify the source code after obtaining + * a software license from RidgeRun. All source code changes must be provided + * back to RidgeRun without any encumbrance. +*/ + +#ifndef R2I_EDGETPU_ENGINE_H +#define R2I_EDGETPU_ENGINE_H + +#include + +namespace r2i { +namespace edgetpu { + +class Engine : public r2i::tflite::Engine { + public: + Engine (); + ~Engine (); + + protected: + + void setupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) + override; + void setInterpreterContext() override; + float *runInference(std::shared_ptr frame, const int &input, + const int &width, const int &height, const int &channels, + r2i::RuntimeError &error) override; +}; + +} +} +#endif //R2I_EDGETPU_ENGINE_H \ No newline at end of file From e60731e0d9f99ad40e570fe5eddf03c3c4f39c8f Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 18 May 2020 16:51:53 -0600 Subject: [PATCH 05/29] Fix code standard issues to the TFlite and TPU engines --- configure.ac | 2 +- r2i/Makefile.am | 22 +++++++++++----------- r2i/edgetpu/Makefile.am | 8 ++++---- r2i/edgetpu/engine.cc | 33 ++++++++++++++------------------- r2i/edgetpu/engine.h | 14 +++++++------- r2i/tflite/engine.cc | 17 +++++++++-------- r2i/tflite/engine.h | 8 ++++---- 7 files changed, 50 insertions(+), 54 deletions(-) diff --git a/configure.ac b/configure.ac index 0ea90d4b..48e042f9 100644 --- a/configure.ac +++ b/configure.ac @@ -84,11 +84,11 @@ examples/r2i/tensorflow/Makefile examples/r2i/tensorrt/Makefile examples/r2i/tflite/Makefile r2i/Makefile +r2i/edgetpu/Makefile r2i/ncsdk/Makefile r2i/tensorflow/Makefile r2i/tensorrt/Makefile r2i/tflite/Makefile -r2i/edgetpu/Makefile r2inference-${RR_PACKAGE_VERSION}.pc:r2inference.pc.in tests/Makefile tests/acceptance/Makefile diff --git a/r2i/Makefile.am b/r2i/Makefile.am index 5cdf58d9..d275106e 100644 --- a/r2i/Makefile.am +++ b/r2i/Makefile.am @@ -10,6 +10,10 @@ SUBDIRS = +if HAVE_EDGETPU +SUBDIRS += edgetpu +endif + if HAVE_NCSDK SUBDIRS += ncsdk endif @@ -26,16 +30,12 @@ if HAVE_TENSORRT SUBDIRS += tensorrt endif -if HAVE_EDGETPU -SUBDIRS += edgetpu -endif - DIST_SUBDIRS = \ + edgetpu \ ncsdk \ tensorflow \ tensorrt \ - tflite \ - edgetpu + tflite lib_LTLIBRARIES = libr2inference-@RR_PACKAGE_VERSION@.la @@ -81,6 +81,11 @@ r2iinclude_HEADERS = \ runtimeerror.h +if HAVE_EDGETPU +libr2inference_@RR_PACKAGE_VERSION@_la_LIBADD += \ + $(top_builddir)/r2i/edgetpu/libedgetpu.la +endif + if HAVE_NCSDK libr2inference_@RR_PACKAGE_VERSION@_la_LIBADD += \ $(top_builddir)/r2i/ncsdk/libncsdk.la @@ -100,8 +105,3 @@ if HAVE_TFLITE libr2inference_@RR_PACKAGE_VERSION@_la_LIBADD += \ $(top_builddir)/r2i/tflite/libtflite.la endif - -if HAVE_EDGETPU -libr2inference_@RR_PACKAGE_VERSION@_la_LIBADD += \ - $(top_builddir)/r2i/edgetpu/libedgetpu.la -endif diff --git a/r2i/edgetpu/Makefile.am b/r2i/edgetpu/Makefile.am index 497cf0a1..808ee49a 100644 --- a/r2i/edgetpu/Makefile.am +++ b/r2i/edgetpu/Makefile.am @@ -1,4 +1,4 @@ -# Copyright (C) 2018 RidgeRun, LLC (http://www.ridgerun.com) +# Copyright (C) 2020 RidgeRun, LLC (http://www.ridgerun.com) # All Rights Reserved. # # The contents of this software are proprietary and confidential to RidgeRun, @@ -25,19 +25,19 @@ libedgetpu_la_CPPFLAGS = \ libedgetpu_la_CFLAGS = \ $(RR_CFLAGS) \ $(TFLITE_CFLAGS) \ - $(EDGETPU_CFLAGS) \ + $(EDGETPU_CFLAGS) \ $(CODE_COVERAGE_CFLAGS) libedgetpu_la_CXXFLAGS = \ $(RR_CXXFLAGS) \ $(TFLITE_CFLAGS) \ - $(EDGETPU_CFLAGS) \ + $(EDGETPU_CFLAGS) \ $(CODE_COVERAGE_CXXFLAGS) libedgetpu_la_LIBADD = \ $(RR_LIBS) \ $(TFLITE_LIBS) \ - $(EDGETPU_LIBS) \ + $(EDGETPU_LIBS) \ $(CODE_COVERAGE_LIBS) tensorflowliteedgetpuinclude_HEADERS = \ diff --git a/r2i/edgetpu/engine.cc b/r2i/edgetpu/engine.cc index 1a2050b0..446e5246 100644 --- a/r2i/edgetpu/engine.cc +++ b/r2i/edgetpu/engine.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2018-2020 RidgeRun, LLC (http://www.ridgerun.com) +/* Copyright (C) 2020 RidgeRun, LLC (http://www.ridgerun.com) * All Rights Reserved. * * The contents of this software are proprietary and confidential to RidgeRun, @@ -23,43 +23,39 @@ Engine::Engine () { this->allow_fp16 = 0; } -Engine::~Engine () { - r2i::tflite::Engine::Stop(); -} - -void Engine::setupResolver(::tflite::ops::builtin::BuiltinOpResolver +void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) { resolver.AddCustom(::edgetpu::kCustomOp, ::edgetpu::RegisterCustomOp()); } -void Engine::setInterpreterContext() { +void Engine::SetInterpreterContext() { std::shared_ptr<::edgetpu::EdgeTpuContext> edgetpu_context = ::edgetpu::EdgeTpuManager::GetSingleton()->OpenDevice(); this->interpreter->SetExternalContext(kTfLiteEdgeTpuContext, edgetpu_context.get()); } -float *Engine::runInference(std::shared_ptr frame, - const int &input, const int &width, const int &height, const int &channels, +float *Engine::RunInference(std::shared_ptr frame, + const int &input, const int size, r2i::RuntimeError &error) { + std::unique_ptr output_data; + auto input_tensor = this->interpreter->typed_tensor(input); - auto input_data = (float *)frame->GetData(); + auto input_data = static_cast(frame->GetData()); if (!input_data) { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, "Failed to get image data"); + error.Set (RuntimeError::Code::WRONG_API_USAGE, "Failed to get image data"); return nullptr; } // Convert to fixed point - std::unique_ptr input_data_fixed(new uint8_t(height * width * - channels)); - int index; - for (index = 0; index < height * width * channels; index++) { + std::unique_ptr input_data_fixed(new uint8_t(size)); + for (int index = 0; index < size; index++) { input_data_fixed.get()[index] = static_cast(input_data[index]); } memcpy(input_tensor, input_data_fixed.get(), - height * width * channels * sizeof(uint8_t)); + size * sizeof(uint8_t)); if (this->interpreter->Invoke() != kTfLiteOk) { error.Set (RuntimeError::Code::FRAMEWORK_ERROR, @@ -69,7 +65,6 @@ float *Engine::runInference(std::shared_ptr frame, const auto &output_indices = interpreter->outputs(); const auto *out_tensor = interpreter->tensor(output_indices[0]); - std::unique_ptr output_data; if (out_tensor->type == kTfLiteUInt8) { uint8_t *output_data_fixed = interpreter->typed_output_tensor(0); @@ -79,7 +74,7 @@ float *Engine::runInference(std::shared_ptr frame, // Convert to fixed point auto output_size = GetRequiredBufferSize(output_dims); output_data = std::unique_ptr(new float(output_size)); - for (index = 0; index < output_size; index++) { + for (int index = 0; index < output_size; index++) { output_data.get()[index] = static_cast(output_data_fixed[index]); } } else if (out_tensor->type == kTfLiteFloat32) { @@ -95,4 +90,4 @@ float *Engine::runInference(std::shared_ptr frame, } } //namepsace edgetpu -} //namepsace r2i \ No newline at end of file +} //namepsace r2i diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h index 0e2baa57..fdc12b8d 100644 --- a/r2i/edgetpu/engine.h +++ b/r2i/edgetpu/engine.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2018 RidgeRun, LLC (http://www.ridgerun.com) +/* Copyright (C) 2020 RidgeRun, LLC (http://www.ridgerun.com) * All Rights Reserved. * * The contents of this software are proprietary and confidential to RidgeRun, @@ -20,18 +20,18 @@ namespace edgetpu { class Engine : public r2i::tflite::Engine { public: Engine (); - ~Engine (); protected: - void setupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) + void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) override; - void setInterpreterContext() override; - float *runInference(std::shared_ptr frame, const int &input, - const int &width, const int &height, const int &channels, + void SetInterpreterContext() override; + float *RunInference(std::shared_ptr frame, const int &input, + const int size, r2i::RuntimeError &error) override; }; } } -#endif //R2I_EDGETPU_ENGINE_H \ No newline at end of file + +#endif //R2I_EDGETPU_ENGINE_H diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index cafb75e3..6ebf57ac 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -74,7 +74,7 @@ RuntimeError Engine::Start () { if (!this->interpreter) { ::tflite::ops::builtin::BuiltinOpResolver resolver; - this->setupResolver(resolver); + this->SetupResolver(resolver); ::tflite::ErrorReporter *error_reporter = ::tflite::DefaultErrorReporter(); @@ -89,7 +89,7 @@ RuntimeError Engine::Start () { return error; } - this->setInterpreterContext(); + this->SetInterpreterContext(); std::shared_ptr<::tflite::Interpreter> tflite_interpreter_shared{std::move(interpreter)}; @@ -203,7 +203,8 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - auto tensor_data = this->runInference(frame, input, wanted_width, wanted_height, + auto tensor_data = this->RunInference(frame, input, + wanted_width * wanted_height * wanted_channels, error); if (r2i::RuntimeError::EOK != error.GetCode()) { return nullptr; @@ -221,17 +222,17 @@ Engine::~Engine () { this->Stop(); } -void Engine::setupResolver(::tflite::ops::builtin::BuiltinOpResolver +void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &/*resolver*/) { // No implementation for tflite engine } -void Engine::setInterpreterContext() { +void Engine::SetInterpreterContext() { // No implementation for tflite engine } -float *Engine::runInference(std::shared_ptr frame, - const int &input, const int &width, const int &height, const int &channels, +float *Engine::RunInference(std::shared_ptr frame, + const int &input, const int size, r2i::RuntimeError &error) { auto input_tensor = this->interpreter->typed_tensor(input); auto input_data = (float *)frame->GetData(); @@ -242,7 +243,7 @@ float *Engine::runInference(std::shared_ptr frame, } memcpy(input_tensor, input_data, - height * width * channels * sizeof(float)); + size * sizeof(float)); if (this->interpreter->Invoke() != kTfLiteOk) { error.Set (RuntimeError::Code::FRAMEWORK_ERROR, diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index 9a99ae76..3135d3df 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -54,10 +54,10 @@ class Engine : public IEngine { int number_of_threads; int allow_fp16; - virtual void setupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver); - virtual void setInterpreterContext(); - virtual float *runInference(std::shared_ptr frame, - const int &input, const int &width, const int &height, const int &channels, + virtual void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver); + virtual void SetInterpreterContext(); + virtual float *RunInference(std::shared_ptr frame, + const int &input, const int size, r2i::RuntimeError &error); }; From e843ba8a4aaf5df41c85d0dede327ce6c688ac67 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 18 May 2020 17:17:07 -0600 Subject: [PATCH 06/29] Add frmework factory for the EdgeTPU --- r2i/edgetpu/Makefile.am | 6 +++-- r2i/edgetpu/frameworkfactory.cc | 42 +++++++++++++++++++++++++++++++++ r2i/edgetpu/frameworkfactory.h | 30 +++++++++++++++++++++++ r2i/frameworks.h | 6 +++++ r2i/iframeworkfactory.cc | 13 ++++++++++ 5 files changed, 95 insertions(+), 2 deletions(-) create mode 100644 r2i/edgetpu/frameworkfactory.cc create mode 100644 r2i/edgetpu/frameworkfactory.h diff --git a/r2i/edgetpu/Makefile.am b/r2i/edgetpu/Makefile.am index 808ee49a..48c378f1 100644 --- a/r2i/edgetpu/Makefile.am +++ b/r2i/edgetpu/Makefile.am @@ -16,7 +16,8 @@ noinst_LTLIBRARIES = libedgetpu.la tensorflowliteedgetpuincludedir = @R2IINCLUDEDIR@/r2i/edgetpu libedgetpu_la_SOURCES = \ - engine.cc + engine.cc \ + frameworkfactory.cc libedgetpu_la_CPPFLAGS = \ $(RR_CPPFLAGS) \ @@ -41,6 +42,7 @@ libedgetpu_la_LIBADD = \ $(CODE_COVERAGE_LIBS) tensorflowliteedgetpuinclude_HEADERS = \ - engine.h + engine.h \ + frameworkfactory.h endif # HAVE_EDGETPU diff --git a/r2i/edgetpu/frameworkfactory.cc b/r2i/edgetpu/frameworkfactory.cc new file mode 100644 index 00000000..48137678 --- /dev/null +++ b/r2i/edgetpu/frameworkfactory.cc @@ -0,0 +1,42 @@ +/* Copyright (C) 2020 RidgeRun, LLC (http://www.ridgerun.com) + * All Rights Reserved. + * + * The contents of this software are proprietary and confidential to RidgeRun, + * LLC. No part of this program may be photocopied, reproduced or translated + * into another programming language without prior written consent of + * RidgeRun, LLC. The user is free to modify the source code after obtaining + * a software license from RidgeRun. All source code changes must be provided + * back to RidgeRun without any encumbrance. +*/ + +#include "frameworkfactory.h" +#include "engine.h" + +#include + +namespace r2i { +namespace edgetpu { + +std::unique_ptr FrameworkFactory::MakeEngine ( + RuntimeError &error) { + error.Clean (); + + return std::unique_ptr (new Engine); +} + +r2i::FrameworkMeta FrameworkFactory::GetDescription ( + RuntimeError &error) { + const FrameworkMeta meta { + .code = r2i::FrameworkCode::EDGETPU, + .name = "tensorflow-lite-edgetpu", + .description = "Google's TensorFlow Lite with EdgeTPU support", + .version = TFLITE_VERSION_STRING + }; + + error.Clean (); + + return meta; +} + +} // namespace tflite +} // namespace r2i diff --git a/r2i/edgetpu/frameworkfactory.h b/r2i/edgetpu/frameworkfactory.h new file mode 100644 index 00000000..20483654 --- /dev/null +++ b/r2i/edgetpu/frameworkfactory.h @@ -0,0 +1,30 @@ +/* Copyright (C) 2020 RidgeRun, LLC (http://www.ridgerun.com) + * All Rights Reserved. + * + * The contents of this software are proprietary and confidential to RidgeRun, + * LLC. No part of this program may be photocopied, reproduced or translated + * into another programming language without prior written consent of + * RidgeRun, LLC. The user is free to modify the source code after obtaining + * a software license from RidgeRun. All source code changes must be provided + * back to RidgeRun without any encumbrance. +*/ + +#ifndef R2I_EDGETPU_FRAMEWORK_FACTORY_H +#define R2I_EDGETPU_FRAMEWORK_FACTORY_H + +#include + +namespace r2i { +namespace edgetpu { + +class FrameworkFactory : public r2i::tflite::FrameworkFactory { + public: + std::unique_ptr MakeEngine (RuntimeError &error) override; + + r2i::FrameworkMeta GetDescription (RuntimeError &error) override; +}; + +} // namespace edgetpu +} // namespace r2k + +#endif //R2I_EDGETPU_FRAMEWORK_FACTORY_H diff --git a/r2i/frameworks.h b/r2i/frameworks.h index 8dfc46d7..3000e275 100644 --- a/r2i/frameworks.h +++ b/r2i/frameworks.h @@ -21,6 +21,12 @@ namespace r2i { * to appropriate hardware. */ enum FrameworkCode { + + /** + * Google's EdgeTPU + */ + EDGETPU, + /** * Intel Movidius Neural Compute software developer kit */ diff --git a/r2i/iframeworkfactory.cc b/r2i/iframeworkfactory.cc index 9876bdcc..7f330b3a 100644 --- a/r2i/iframeworkfactory.cc +++ b/r2i/iframeworkfactory.cc @@ -15,6 +15,7 @@ #include #include "config.h" +#include "edgetpu/frameworkfactory.h" #include "ncsdk/frameworkfactory.h" #include "tensorflow/frameworkfactory.h" #include "tensorrt/frameworkfactory.h" @@ -22,6 +23,14 @@ namespace r2i { +#ifdef HAVE_EDGETPU +static std::unique_ptr +MakeEdgeTPUFactory (RuntimeError &error) { + return std::unique_ptr (new + edgetpu::FrameworkFactory); +} +#endif // HAVE_EDGETPU + #ifdef HAVE_NCSDK static std::unique_ptr MakeNcsdkFactory (RuntimeError &error) { @@ -57,6 +66,10 @@ typedef std::function(RuntimeError &)> MakeFactory; const std::unordered_map frameworks ({ +#ifdef HAVE_EDGETPU + {FrameworkCode::EDGETPU, MakeEdgeTPUFactory}, +#endif //HAVE_EDGETPU + #ifdef HAVE_NCSDK {FrameworkCode::NCSDK, MakeNcsdkFactory}, #endif //HAVE_NCSDK From b1f534d1e9c1c82210492d48b13938cf60b08591 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 18 May 2020 18:06:34 -0600 Subject: [PATCH 07/29] Add meson support to the EdgeTPU backend --- meson.build | 15 +++++++++++++++ meson_options.txt | 2 ++ r2i/edgetpu/meson.build | 23 +++++++++++++++++++++++ r2i/meson.build | 9 +++++++++ 4 files changed, 49 insertions(+) create mode 100644 r2i/edgetpu/meson.build diff --git a/meson.build b/meson.build index 55f57264..57feb363 100644 --- a/meson.build +++ b/meson.build @@ -44,6 +44,21 @@ if get_option('enable-tflite') cdata.set('HAVE_TFLITE', 1) endif +# Define library dependencies for Tensorflow Lite support +if get_option('enable-edgetpu') + if tensorflow_lite_dep.found() + edgetpu = cpp.find_library(':libedgetpu.so.1.0', required: true) + edgetpu_dep = declare_dependency(dependencies: edgetpu) + thread_dep = dependency('threads') + dl_dep = cpp.find_library('dl', required : true) + rt_dep = cpp.find_library('rt', required : true) + lib_edgetpu_dep = [edgetpu_dep, lib_tflite_dep, dl_dep, rt_dep] + cdata.set('HAVE_EDGETPU', 1) + else + error('The EdgeTPU needs to have enable the TFlite backend.') + endif +endif + # Define library dependencies for TensorRT support if get_option('enable-tensorrt') diff --git a/meson_options.txt b/meson_options.txt index aa79c9df..b0b0a240 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -6,6 +6,8 @@ option('enable-examples', type : 'feature', value : 'enabled', yield : true, des option('enable-docs', type : 'feature', value : 'enabled', yield : true, description : 'Build documentation with Doxygen') # Enable support for different backends +option('enable-edgetpu', type : 'boolean', value: false, + description : 'Enable EdgeTPU backend support') option('enable-tensorflow', type : 'boolean', value: false, description : 'Enable Tensorflow backend support') option('enable-tflite', type : 'boolean', value: false, diff --git a/r2i/edgetpu/meson.build b/r2i/edgetpu/meson.build new file mode 100644 index 00000000..a013e5b7 --- /dev/null +++ b/r2i/edgetpu/meson.build @@ -0,0 +1,23 @@ +# Define source code +edgetpu_sources = [ + 'engine.cc', + 'frameworkfactory.cc', +] + +edgetpu_headers = [ + 'engine.h', + 'frameworkfactory.h', +] + +# Build library +edgetpu_lib = static_library('edgetpu', + edgetpu_sources, + include_directories : [configinc], + dependencies : [lib_edgetpu_dep], +) + +# Install library header files +install_headers(edgetpu_headers, subdir : inc_install_dir + '/r2i/edgetpu') + +# Define the library as an internal dependency to the current build +internal_edgetpu_dep = declare_dependency(link_with: edgetpu_lib, dependencies: lib_edgetpu_dep) diff --git a/r2i/meson.build b/r2i/meson.build index 9869da37..8082c1dc 100644 --- a/r2i/meson.build +++ b/r2i/meson.build @@ -10,6 +10,15 @@ if get_option('enable-tflite') r2inference_internal_dep += [internal_tflite_dep] endif +if get_option('enable-edgetpu') + if internal_tflite_dep.found() + subdir('edgetpu') + r2inference_internal_dep += [internal_edgetpu_dep] + else + error('The EdgeTPU needs to have enable the TFlite backend.') + endif +endif + if get_option('enable-tensorrt') subdir('tensorrt') r2inference_internal_dep += [internal_trt_dep] From d1d2ffad3ac3b3d114bf8d0de1d0473071ad5d83 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Tue, 19 May 2020 09:05:49 -0600 Subject: [PATCH 08/29] Abstract the inference of the tflite engine --- r2i/edgetpu/engine.cc | 55 +---------------------------- r2i/edgetpu/engine.h | 3 -- r2i/tflite/engine.cc | 82 ++++++++++++++++++++++++++++++++++--------- r2i/tflite/engine.h | 10 ++++-- 4 files changed, 73 insertions(+), 77 deletions(-) diff --git a/r2i/edgetpu/engine.cc b/r2i/edgetpu/engine.cc index 446e5246..c477dd5f 100644 --- a/r2i/edgetpu/engine.cc +++ b/r2i/edgetpu/engine.cc @@ -21,6 +21,7 @@ Engine::Engine () { this->model = nullptr; this->number_of_threads = 1; this->allow_fp16 = 0; + this->allow_quantized_models = true; } void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver @@ -35,59 +36,5 @@ void Engine::SetInterpreterContext() { edgetpu_context.get()); } -float *Engine::RunInference(std::shared_ptr frame, - const int &input, const int size, - r2i::RuntimeError &error) { - std::unique_ptr output_data; - - auto input_tensor = this->interpreter->typed_tensor(input); - auto input_data = static_cast(frame->GetData()); - - if (!input_data) { - error.Set (RuntimeError::Code::WRONG_API_USAGE, "Failed to get image data"); - return nullptr; - } - - // Convert to fixed point - std::unique_ptr input_data_fixed(new uint8_t(size)); - for (int index = 0; index < size; index++) { - input_data_fixed.get()[index] = static_cast(input_data[index]); - } - - memcpy(input_tensor, input_data_fixed.get(), - size * sizeof(uint8_t)); - - if (this->interpreter->Invoke() != kTfLiteOk) { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, - "Failed to invoke tflite!"); - return nullptr; - } - - const auto &output_indices = interpreter->outputs(); - const auto *out_tensor = interpreter->tensor(output_indices[0]); - - if (out_tensor->type == kTfLiteUInt8) { - uint8_t *output_data_fixed = interpreter->typed_output_tensor(0); - TfLiteIntArray *output_dims = this->interpreter->tensor( - output_indices[0])->dims; - - // Convert to fixed point - auto output_size = GetRequiredBufferSize(output_dims); - output_data = std::unique_ptr(new float(output_size)); - for (int index = 0; index < output_size; index++) { - output_data.get()[index] = static_cast(output_data_fixed[index]); - } - } else if (out_tensor->type == kTfLiteFloat32) { - output_data = std::unique_ptr(interpreter->typed_output_tensor - (0)); - } else { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, - "Output tensor has unsupported output type"); - return nullptr; - } - - return output_data.get(); -} - } //namepsace edgetpu } //namepsace r2i diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h index fdc12b8d..b5d6cb2b 100644 --- a/r2i/edgetpu/engine.h +++ b/r2i/edgetpu/engine.h @@ -26,9 +26,6 @@ class Engine : public r2i::tflite::Engine { void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) override; void SetInterpreterContext() override; - float *RunInference(std::shared_ptr frame, const int &input, - const int size, - r2i::RuntimeError &error) override; }; } diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 6ebf57ac..0f831f5b 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -22,6 +22,7 @@ namespace tflite { Engine::Engine () : state(State::STOPPED), model(nullptr) { this->number_of_threads = 0; this->allow_fp16 = 0; + this->allow_quantized_models = false; } RuntimeError Engine::SetModel (std::shared_ptr in_model) { @@ -197,15 +198,26 @@ std::shared_ptr Engine::Predict (std::shared_ptr } // Check the model quantization, only 32 bits allowed - if (kTfLiteFloat32 != interpreter->tensor(input)->type) { + if (kTfLiteFloat32 != interpreter->tensor(input)->type + && !this->allow_quantized_models) { error.Set (RuntimeError::Code::FRAMEWORK_ERROR, "The provided model quantization is not allowed, only float32 is supported"); return nullptr; } - auto tensor_data = this->RunInference(frame, input, - wanted_width * wanted_height * - wanted_channels, error); + this->PreprocessInputData(static_cast(frame->GetData()), + wanted_width * wanted_height * wanted_channels, error); + if (r2i::RuntimeError::EOK != error.GetCode()) { + return nullptr; + } + + if (this->interpreter->Invoke() != kTfLiteOk) { + error.Set (RuntimeError::Code::FRAMEWORK_ERROR, + "Failed to invoke tflite!"); + return nullptr; + } + + auto tensor_data = this->GetOutputTensorData(error); if (r2i::RuntimeError::EOK != error.GetCode()) { return nullptr; } @@ -231,27 +243,63 @@ void Engine::SetInterpreterContext() { // No implementation for tflite engine } -float *Engine::RunInference(std::shared_ptr frame, - const int &input, const int size, - r2i::RuntimeError &error) { - auto input_tensor = this->interpreter->typed_tensor(input); - auto input_data = (float *)frame->GetData(); +void Engine::PreprocessInputData(const float *input_data, const int size, + r2i::RuntimeError &error) { + const auto &input_indices = interpreter->inputs(); + const auto *tensor = interpreter->tensor(input_indices[0]); if (!input_data) { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, "Failed to get image data"); - return nullptr; + error.Set (RuntimeError::Code::WRONG_API_USAGE, "Failed to get image data"); + return; } - memcpy(input_tensor, input_data, - size * sizeof(float)); + if (kTfLiteUInt8 == tensor->type) { + auto input_fixed_tensor = this->interpreter->typed_tensor + (input_indices[0]); - if (this->interpreter->Invoke() != kTfLiteOk) { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, - "Failed to invoke tflite!"); + // Convert to fixed point + std::unique_ptr input_data_fixed(new uint8_t(size)); + for (int index = 0; index < size; index++) { + input_data_fixed.get()[index] = static_cast(input_data[index]); + } + + memcpy(input_fixed_tensor, input_data_fixed.get(), size * sizeof(uint8_t)); + } else if (kTfLiteFloat32 == tensor->type) { + auto input_tensor = this->interpreter->typed_tensor(input_indices[0]); + + memcpy(input_tensor, input_data, size * sizeof(float)); + } else { + error.Set (RuntimeError::Code::WRONG_API_USAGE, + "Output tensor has unsupported output type"); + return; + } +} + +float *Engine::GetOutputTensorData(r2i::RuntimeError &error) { + float *output_data = nullptr; + const auto &output_indices = interpreter->outputs(); + const auto *tensor = interpreter->tensor(output_indices[0]); + + if (kTfLiteUInt8 == tensor->type) { + uint8_t *output_data_fixed = interpreter->typed_output_tensor(0); + TfLiteIntArray *output_dims = this->interpreter->tensor( + output_indices[0])->dims; + + // Convert to floating point + auto output_size = GetRequiredBufferSize(output_dims); + output_data = (float *)malloc(output_size * sizeof(float)); + for (int index = 0; index < output_size; index++) { + output_data[index] = static_cast(output_data_fixed[index]); + } + } else if (kTfLiteFloat32 == tensor->type) { + output_data = interpreter->typed_output_tensor(0); + } else { + error.Set (RuntimeError::Code::WRONG_API_USAGE, + "Output tensor has unsupported output type"); return nullptr; } - return this->interpreter->typed_output_tensor(0); + return output_data; } } //namespace tflite diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index 3135d3df..0aa9a21b 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -53,12 +53,16 @@ class Engine : public IEngine { std::shared_ptr model; int number_of_threads; int allow_fp16; + bool allow_quantized_models; virtual void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver); virtual void SetInterpreterContext(); - virtual float *RunInference(std::shared_ptr frame, - const int &input, const int size, - r2i::RuntimeError &error); + + private: + void PreprocessInputData(const float *input_data, const int size, + r2i::RuntimeError &error); + float *GetOutputTensorData(r2i::RuntimeError &error); + }; } From 0dae7d9e192f9523cc19544aed8a02ade055b08c Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Tue, 19 May 2020 17:04:26 -0600 Subject: [PATCH 09/29] Fix error message of the TPU backend in meson and autotools --- configure.ac | 2 +- meson.build | 2 +- meson_options.txt | 2 +- r2i/meson.build | 8 ++------ 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/configure.ac b/configure.ac index 48e042f9..30e415c8 100644 --- a/configure.ac +++ b/configure.ac @@ -59,7 +59,7 @@ RR_CHECK_FEATURE_LIB(TFLITE, TensorFlow lite Installation, # add specific LIBS for TFLITE AC_SUBST([TFLITE_LIBS], ["$TFLITE_LIBS -pthread -ldl -lrt"]) -AM_COND_IF([HAVE_EDGETPU], [AM_COND_IF([HAVE_TFLITE], [], [AC_MSG_ERROR(The EdgeTPU backend needs TFLITE enable)])], []) +AM_COND_IF([HAVE_EDGETPU], [AM_COND_IF([HAVE_TFLITE], [], [AC_MSG_ERROR(The EdgeTPU backend needs TFLITE enabled as well)])], []) RR_CHECK_FEATURE_LIB(TENSORRT, TensorRT Installation, nvinfer, createInferBuilder_INTERNAL, NvInfer.h, no) diff --git a/meson.build b/meson.build index 57feb363..4ee72e87 100644 --- a/meson.build +++ b/meson.build @@ -55,7 +55,7 @@ if get_option('enable-edgetpu') lib_edgetpu_dep = [edgetpu_dep, lib_tflite_dep, dl_dep, rt_dep] cdata.set('HAVE_EDGETPU', 1) else - error('The EdgeTPU needs to have enable the TFlite backend.') + error('The EdgeTPU needs to have the TFLite backend enabled as well.') endif endif diff --git a/meson_options.txt b/meson_options.txt index b0b0a240..c9fd6c80 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -7,7 +7,7 @@ option('enable-docs', type : 'feature', value : 'enabled', yield : true, descrip # Enable support for different backends option('enable-edgetpu', type : 'boolean', value: false, - description : 'Enable EdgeTPU backend support') + description : 'Enable EdgeTPU backend support. This backend needs to have the TFLite backend enabled.') option('enable-tensorflow', type : 'boolean', value: false, description : 'Enable Tensorflow backend support') option('enable-tflite', type : 'boolean', value: false, diff --git a/r2i/meson.build b/r2i/meson.build index 8082c1dc..ac559e42 100644 --- a/r2i/meson.build +++ b/r2i/meson.build @@ -11,12 +11,8 @@ if get_option('enable-tflite') endif if get_option('enable-edgetpu') - if internal_tflite_dep.found() - subdir('edgetpu') - r2inference_internal_dep += [internal_edgetpu_dep] - else - error('The EdgeTPU needs to have enable the TFlite backend.') - endif + subdir('edgetpu') + r2inference_internal_dep += [internal_edgetpu_dep] endif if get_option('enable-tensorrt') From 386c77dbc6c8c6499f20655e424144bd9ce4c83c Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Tue, 19 May 2020 17:07:24 -0600 Subject: [PATCH 10/29] Fix code standard issues of the tpu and tflite engines --- r2i/edgetpu/engine.cc | 16 +++++--------- r2i/edgetpu/engine.h | 7 ++++++- r2i/edgetpu/frameworkfactory.cc | 5 +++-- r2i/tflite/engine.cc | 37 +++++++++++++++++++++------------ r2i/tflite/engine.h | 6 ++++-- 5 files changed, 42 insertions(+), 29 deletions(-) diff --git a/r2i/edgetpu/engine.cc b/r2i/edgetpu/engine.cc index c477dd5f..dd3f6a31 100644 --- a/r2i/edgetpu/engine.cc +++ b/r2i/edgetpu/engine.cc @@ -11,17 +11,11 @@ #include "r2i/edgetpu/engine.h" -#include - namespace r2i { namespace edgetpu { -Engine::Engine () { - this->state = r2i::tflite::Engine::State::STOPPED; - this->model = nullptr; +Engine::Engine () : tflite::Engine() { this->number_of_threads = 1; - this->allow_fp16 = 0; - this->allow_quantized_models = true; } void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver @@ -29,11 +23,11 @@ void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver resolver.AddCustom(::edgetpu::kCustomOp, ::edgetpu::RegisterCustomOp()); } -void Engine::SetInterpreterContext() { - std::shared_ptr<::edgetpu::EdgeTpuContext> edgetpu_context = - ::edgetpu::EdgeTpuManager::GetSingleton()->OpenDevice(); +void Engine::SetInterpreterContext(std::shared_ptr<::tflite::Interpreter> + interpreter) { + this->edgetpu_context = ::edgetpu::EdgeTpuManager::GetSingleton()->OpenDevice(); this->interpreter->SetExternalContext(kTfLiteEdgeTpuContext, - edgetpu_context.get()); + this->edgetpu_context.get()); } } //namepsace edgetpu diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h index b5d6cb2b..557866c7 100644 --- a/r2i/edgetpu/engine.h +++ b/r2i/edgetpu/engine.h @@ -12,6 +12,7 @@ #ifndef R2I_EDGETPU_ENGINE_H #define R2I_EDGETPU_ENGINE_H +#include #include namespace r2i { @@ -25,7 +26,11 @@ class Engine : public r2i::tflite::Engine { void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) override; - void SetInterpreterContext() override; + void SetInterpreterContext(std::shared_ptr<::tflite::Interpreter> interpreter) + override; + + private: + std::shared_ptr<::edgetpu::EdgeTpuContext> edgetpu_context; }; } diff --git a/r2i/edgetpu/frameworkfactory.cc b/r2i/edgetpu/frameworkfactory.cc index 48137678..b0f8e918 100644 --- a/r2i/edgetpu/frameworkfactory.cc +++ b/r2i/edgetpu/frameworkfactory.cc @@ -12,6 +12,7 @@ #include "frameworkfactory.h" #include "engine.h" +#include #include namespace r2i { @@ -28,9 +29,9 @@ r2i::FrameworkMeta FrameworkFactory::GetDescription ( RuntimeError &error) { const FrameworkMeta meta { .code = r2i::FrameworkCode::EDGETPU, - .name = "tensorflow-lite-edgetpu", + .name = "edgetpu", .description = "Google's TensorFlow Lite with EdgeTPU support", - .version = TFLITE_VERSION_STRING + .version = ::edgetpu::EdgeTpuManager::GetSingleton()->Version() }; error.Clean (); diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 0f831f5b..dbcf595e 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -22,7 +22,6 @@ namespace tflite { Engine::Engine () : state(State::STOPPED), model(nullptr) { this->number_of_threads = 0; this->allow_fp16 = 0; - this->allow_quantized_models = false; } RuntimeError Engine::SetModel (std::shared_ptr in_model) { @@ -90,7 +89,7 @@ RuntimeError Engine::Start () { return error; } - this->SetInterpreterContext(); + this->SetInterpreterContext(this->interpreter); std::shared_ptr<::tflite::Interpreter> tflite_interpreter_shared{std::move(interpreter)}; @@ -197,14 +196,6 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - // Check the model quantization, only 32 bits allowed - if (kTfLiteFloat32 != interpreter->tensor(input)->type - && !this->allow_quantized_models) { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, - "The provided model quantization is not allowed, only float32 is supported"); - return nullptr; - } - this->PreprocessInputData(static_cast(frame->GetData()), wanted_width * wanted_height * wanted_channels, error); if (r2i::RuntimeError::EOK != error.GetCode()) { @@ -239,7 +230,8 @@ void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver // No implementation for tflite engine } -void Engine::SetInterpreterContext() { +void Engine::SetInterpreterContext( + std::shared_ptr<::tflite::Interpreter> /*interpreter*/) { // No implementation for tflite engine } @@ -260,7 +252,8 @@ void Engine::PreprocessInputData(const float *input_data, const int size, // Convert to fixed point std::unique_ptr input_data_fixed(new uint8_t(size)); for (int index = 0; index < size; index++) { - input_data_fixed.get()[index] = static_cast(input_data[index]); + input_data_fixed.get()[index] = this->ConvertToFixedPoint(tensor, + input_data[index]); } memcpy(input_fixed_tensor, input_data_fixed.get(), size * sizeof(uint8_t)); @@ -289,7 +282,8 @@ float *Engine::GetOutputTensorData(r2i::RuntimeError &error) { auto output_size = GetRequiredBufferSize(output_dims); output_data = (float *)malloc(output_size * sizeof(float)); for (int index = 0; index < output_size; index++) { - output_data[index] = static_cast(output_data_fixed[index]); + output_data[index] = this->ConvertToFloatingPoint(tensor, + output_data_fixed[index]); } } else if (kTfLiteFloat32 == tensor->type) { output_data = interpreter->typed_output_tensor(0); @@ -302,5 +296,22 @@ float *Engine::GetOutputTensorData(r2i::RuntimeError &error) { return output_data; } +uint8_t Engine::ConvertToFixedPoint(const TfLiteTensor *tensor, float value) { + auto zero_point = tensor->params.zero_point; + auto scale = tensor->params.scale; + + float result = (value / scale) + zero_point; + return static_cast(result); +} + +float Engine::ConvertToFloatingPoint(const TfLiteTensor *tensor, + uint8_t value) { + auto zero_point = tensor->params.zero_point; + auto scale = tensor->params.scale; + + uint8_t result = (static_cast(value) + zero_point) * scale; + return result; +} + } //namespace tflite } //namepsace r2i diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index 0aa9a21b..342561ad 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -53,15 +53,17 @@ class Engine : public IEngine { std::shared_ptr model; int number_of_threads; int allow_fp16; - bool allow_quantized_models; virtual void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver); - virtual void SetInterpreterContext(); + virtual void SetInterpreterContext(std::shared_ptr<::tflite::Interpreter> + interpreter); private: void PreprocessInputData(const float *input_data, const int size, r2i::RuntimeError &error); float *GetOutputTensorData(r2i::RuntimeError &error); + uint8_t ConvertToFixedPoint(const TfLiteTensor *tensor, float value); + float ConvertToFloatingPoint(const TfLiteTensor *tensor, uint8_t value); }; From 4e2b51bdc6af4fcf01d031f9e9d1952a13ed8706 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Thu, 21 May 2020 00:18:55 +0000 Subject: [PATCH 11/29] Fix inference issues in EdgeTPU engine WIP --- r2i/edgetpu/engine.cc | 15 +++++++++++---- r2i/edgetpu/engine.h | 3 ++- r2i/tflite/engine.cc | 34 +++++++++++++++++++--------------- r2i/tflite/engine.h | 8 ++++---- 4 files changed, 36 insertions(+), 24 deletions(-) diff --git a/r2i/edgetpu/engine.cc b/r2i/edgetpu/engine.cc index dd3f6a31..b56d47c1 100644 --- a/r2i/edgetpu/engine.cc +++ b/r2i/edgetpu/engine.cc @@ -16,6 +16,7 @@ namespace edgetpu { Engine::Engine () : tflite::Engine() { this->number_of_threads = 1; + this->edgetpu_context = nullptr; } void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver @@ -23,11 +24,17 @@ void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver resolver.AddCustom(::edgetpu::kCustomOp, ::edgetpu::RegisterCustomOp()); } -void Engine::SetInterpreterContext(std::shared_ptr<::tflite::Interpreter> - interpreter) { +void Engine::SetInterpreterContext(::tflite::Interpreter *interpreter) { this->edgetpu_context = ::edgetpu::EdgeTpuManager::GetSingleton()->OpenDevice(); - this->interpreter->SetExternalContext(kTfLiteEdgeTpuContext, - this->edgetpu_context.get()); + + interpreter->SetExternalContext(kTfLiteEdgeTpuContext, + this->edgetpu_context.get()); +} + +Engine::~Engine() { + this->Stop(); + this->interpreter.reset(); + this->edgetpu_context.reset(); } } //namepsace edgetpu diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h index 557866c7..ec4f0a62 100644 --- a/r2i/edgetpu/engine.h +++ b/r2i/edgetpu/engine.h @@ -21,12 +21,13 @@ namespace edgetpu { class Engine : public r2i::tflite::Engine { public: Engine (); + ~Engine (); protected: void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver) override; - void SetInterpreterContext(std::shared_ptr<::tflite::Interpreter> interpreter) + void SetInterpreterContext(::tflite::Interpreter *interpreter) override; private: diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index dbcf595e..8b8e6309 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -11,10 +11,13 @@ #include "r2i/tflite/engine.h" +#include + #include "r2i/tflite/prediction.h" #include "r2i/tflite/frame.h" #include #include +#include namespace r2i { namespace tflite { @@ -89,8 +92,6 @@ RuntimeError Engine::Start () { return error; } - this->SetInterpreterContext(this->interpreter); - std::shared_ptr<::tflite::Interpreter> tflite_interpreter_shared{std::move(interpreter)}; this->interpreter = tflite_interpreter_shared; @@ -169,6 +170,8 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } + this->SetInterpreterContext(this->interpreter.get()); + if (this->number_of_threads > 0) { interpreter->SetNumThreads(this->number_of_threads); } @@ -197,7 +200,7 @@ std::shared_ptr Engine::Predict (std::shared_ptr } this->PreprocessInputData(static_cast(frame->GetData()), - wanted_width * wanted_height * wanted_channels, error); + wanted_width * wanted_height * wanted_channels, this->interpreter.get(), error); if (r2i::RuntimeError::EOK != error.GetCode()) { return nullptr; } @@ -208,7 +211,7 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - auto tensor_data = this->GetOutputTensorData(error); + auto tensor_data = this->GetOutputTensorData(this->interpreter.get(), error); if (r2i::RuntimeError::EOK != error.GetCode()) { return nullptr; } @@ -223,6 +226,7 @@ std::shared_ptr Engine::Predict (std::shared_ptr Engine::~Engine () { this->Stop(); + this->interpreter.reset(); } void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver @@ -230,13 +234,12 @@ void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver // No implementation for tflite engine } -void Engine::SetInterpreterContext( - std::shared_ptr<::tflite::Interpreter> /*interpreter*/) { +void Engine::SetInterpreterContext(::tflite::Interpreter */*interpreter*/) { // No implementation for tflite engine } void Engine::PreprocessInputData(const float *input_data, const int size, - r2i::RuntimeError &error) { + ::tflite::Interpreter *interpreter, r2i::RuntimeError &error) { const auto &input_indices = interpreter->inputs(); const auto *tensor = interpreter->tensor(input_indices[0]); @@ -246,19 +249,18 @@ void Engine::PreprocessInputData(const float *input_data, const int size, } if (kTfLiteUInt8 == tensor->type) { - auto input_fixed_tensor = this->interpreter->typed_tensor - (input_indices[0]); + auto *input_fixed_tensor = interpreter->typed_input_tensor(0); // Convert to fixed point - std::unique_ptr input_data_fixed(new uint8_t(size)); + std::vector input_data_fixed; for (int index = 0; index < size; index++) { - input_data_fixed.get()[index] = this->ConvertToFixedPoint(tensor, - input_data[index]); + input_data_fixed.push_back(this->ConvertToFixedPoint(tensor, + input_data[index])); } - memcpy(input_fixed_tensor, input_data_fixed.get(), size * sizeof(uint8_t)); + memcpy(input_fixed_tensor, input_data_fixed.data(), size * sizeof(uint8_t)); } else if (kTfLiteFloat32 == tensor->type) { - auto input_tensor = this->interpreter->typed_tensor(input_indices[0]); + auto input_tensor = interpreter->typed_tensor(input_indices[0]); memcpy(input_tensor, input_data, size * sizeof(float)); } else { @@ -268,7 +270,8 @@ void Engine::PreprocessInputData(const float *input_data, const int size, } } -float *Engine::GetOutputTensorData(r2i::RuntimeError &error) { +float *Engine::GetOutputTensorData(::tflite::Interpreter *interpreter, + r2i::RuntimeError &error) { float *output_data = nullptr; const auto &output_indices = interpreter->outputs(); const auto *tensor = interpreter->tensor(output_indices[0]); @@ -301,6 +304,7 @@ uint8_t Engine::ConvertToFixedPoint(const TfLiteTensor *tensor, float value) { auto scale = tensor->params.scale; float result = (value / scale) + zero_point; + return static_cast(result); } diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index 342561ad..eb03e101 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -55,13 +55,13 @@ class Engine : public IEngine { int allow_fp16; virtual void SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &resolver); - virtual void SetInterpreterContext(std::shared_ptr<::tflite::Interpreter> - interpreter); + virtual void SetInterpreterContext(::tflite::Interpreter *interpreter); private: void PreprocessInputData(const float *input_data, const int size, - r2i::RuntimeError &error); - float *GetOutputTensorData(r2i::RuntimeError &error); + ::tflite::Interpreter *interpreter, r2i::RuntimeError &error); + float *GetOutputTensorData(::tflite::Interpreter *interpreter, + r2i::RuntimeError &error); uint8_t ConvertToFixedPoint(const TfLiteTensor *tensor, float value); float ConvertToFloatingPoint(const TfLiteTensor *tensor, uint8_t value); From 1ca025ca28af91eb685cb0dede0b308438ac0d01 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Fri, 22 May 2020 04:17:31 +0000 Subject: [PATCH 12/29] Fix the output tensor precessing in tflite --- r2i/edgetpu/engine.h | 3 +- r2i/tflite/engine.cc | 91 +++++++++++++++++++++----------------------- r2i/tflite/engine.h | 5 +-- 3 files changed, 48 insertions(+), 51 deletions(-) diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h index ec4f0a62..5316ab79 100644 --- a/r2i/edgetpu/engine.h +++ b/r2i/edgetpu/engine.h @@ -12,9 +12,10 @@ #ifndef R2I_EDGETPU_ENGINE_H #define R2I_EDGETPU_ENGINE_H -#include #include +#include + namespace r2i { namespace edgetpu { diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 8b8e6309..b8a333c7 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -17,7 +17,6 @@ #include "r2i/tflite/frame.h" #include #include -#include namespace r2i { namespace tflite { @@ -211,14 +210,13 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - auto tensor_data = this->GetOutputTensorData(this->interpreter.get(), error); + int output_size; + auto tensor_data = this->GetOutputTensorData(this->interpreter.get(), + output_size, error); if (r2i::RuntimeError::EOK != error.GetCode()) { return nullptr; } - int output = this->interpreter->outputs()[0]; - TfLiteIntArray *output_dims = this->interpreter->tensor(output)->dims; - auto output_size = GetRequiredBufferSize(output_dims) * sizeof(float); prediction->SetTensorValues(tensor_data, output_size); return prediction; @@ -251,14 +249,13 @@ void Engine::PreprocessInputData(const float *input_data, const int size, if (kTfLiteUInt8 == tensor->type) { auto *input_fixed_tensor = interpreter->typed_input_tensor(0); - // Convert to fixed point std::vector input_data_fixed; + input_data_fixed.resize(size); for (int index = 0; index < size; index++) { - input_data_fixed.push_back(this->ConvertToFixedPoint(tensor, - input_data[index])); + input_data_fixed[index] = (uint8_t)input_data[index]; } - memcpy(input_fixed_tensor, input_data_fixed.data(), size * sizeof(uint8_t)); + memcpy(input_fixed_tensor, input_data_fixed.data(), input_data_fixed.size()); } else if (kTfLiteFloat32 == tensor->type) { auto input_tensor = interpreter->typed_tensor(input_indices[0]); @@ -271,50 +268,50 @@ void Engine::PreprocessInputData(const float *input_data, const int size, } float *Engine::GetOutputTensorData(::tflite::Interpreter *interpreter, + int &output_size, r2i::RuntimeError &error) { - float *output_data = nullptr; + std::vector output_data; const auto &output_indices = interpreter->outputs(); - const auto *tensor = interpreter->tensor(output_indices[0]); + const int num_outputs = output_indices.size(); - if (kTfLiteUInt8 == tensor->type) { - uint8_t *output_data_fixed = interpreter->typed_output_tensor(0); - TfLiteIntArray *output_dims = this->interpreter->tensor( - output_indices[0])->dims; - - // Convert to floating point - auto output_size = GetRequiredBufferSize(output_dims); - output_data = (float *)malloc(output_size * sizeof(float)); - for (int index = 0; index < output_size; index++) { - output_data[index] = this->ConvertToFloatingPoint(tensor, - output_data_fixed[index]); - } - } else if (kTfLiteFloat32 == tensor->type) { - output_data = interpreter->typed_output_tensor(0); - } else { - error.Set (RuntimeError::Code::WRONG_API_USAGE, - "Output tensor has unsupported output type"); - return nullptr; - } + int out_idx = 0; + for (int index = 0; index < num_outputs; ++index) { + const auto *out_tensor = interpreter->tensor(output_indices[index]); - return output_data; -} - -uint8_t Engine::ConvertToFixedPoint(const TfLiteTensor *tensor, float value) { - auto zero_point = tensor->params.zero_point; - auto scale = tensor->params.scale; - - float result = (value / scale) + zero_point; - - return static_cast(result); -} + if (nullptr == out_tensor) { + error.Set (RuntimeError::Code::FRAMEWORK_ERROR, + "Output tensor is null"); + return nullptr; + } -float Engine::ConvertToFloatingPoint(const TfLiteTensor *tensor, - uint8_t value) { - auto zero_point = tensor->params.zero_point; - auto scale = tensor->params.scale; + if (kTfLiteUInt8 == out_tensor->type) { + + const int num_values = out_tensor->bytes; + output_data.resize(out_idx + num_values); + const uint8_t *output = interpreter->typed_output_tensor(index); + + for (int value_index = 0; value_index < num_values; ++value_index) { + output_data[out_idx++] = (output[value_index] - out_tensor->params.zero_point) * + out_tensor->params.scale; + } + } else if (kTfLiteFloat32 == out_tensor->type) { + + const int num_values = out_tensor->bytes / sizeof(float); + output_data.resize(out_idx + num_values); + const float *output = interpreter->typed_output_tensor(index); + + for (int value_index = 0; value_index < num_values; ++value_index) { + output_data[out_idx++] = output[value_index]; + } + } else { + error.Set (RuntimeError::Code::WRONG_API_USAGE, + "Output tensor has unsupported output type"); + return nullptr; + } + } - uint8_t result = (static_cast(value) + zero_point) * scale; - return result; + output_size = output_data.size(); + return output_data.data(); } } //namespace tflite diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index eb03e101..c1a49ebf 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -17,6 +17,7 @@ #include #include +#include #include namespace r2i { @@ -60,10 +61,8 @@ class Engine : public IEngine { private: void PreprocessInputData(const float *input_data, const int size, ::tflite::Interpreter *interpreter, r2i::RuntimeError &error); - float *GetOutputTensorData(::tflite::Interpreter *interpreter, + float *GetOutputTensorData(::tflite::Interpreter *interpreter, int &output_size, r2i::RuntimeError &error); - uint8_t ConvertToFixedPoint(const TfLiteTensor *tensor, float value); - float ConvertToFloatingPoint(const TfLiteTensor *tensor, uint8_t value); }; From 6702d20d82345a90594a99112406f8d051a6edd5 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Fri, 22 May 2020 04:35:17 +0000 Subject: [PATCH 13/29] Add edgetpu example --- configure.ac | 1 + examples/r2i/Makefile.am | 5 + examples/r2i/edgetpu/Makefile.am | 38 ++++++ examples/r2i/edgetpu/inception.cc | 205 ++++++++++++++++++++++++++++++ examples/r2i/edgetpu/meson.build | 11 ++ examples/r2i/meson.build | 4 + 6 files changed, 264 insertions(+) create mode 100644 examples/r2i/edgetpu/Makefile.am create mode 100644 examples/r2i/edgetpu/inception.cc create mode 100644 examples/r2i/edgetpu/meson.build diff --git a/configure.ac b/configure.ac index 30e415c8..8655c153 100644 --- a/configure.ac +++ b/configure.ac @@ -79,6 +79,7 @@ docs/api/Makefile docs/uml/Makefile examples/Makefile examples/r2i/Makefile +examples/r2i/edgetpu/Makefile examples/r2i/ncsdk/Makefile examples/r2i/tensorflow/Makefile examples/r2i/tensorrt/Makefile diff --git a/examples/r2i/Makefile.am b/examples/r2i/Makefile.am index 35deaabe..6d645ae9 100644 --- a/examples/r2i/Makefile.am +++ b/examples/r2i/Makefile.am @@ -11,6 +11,7 @@ AM_DEFAULT_SOURCE_EXT = .cc DIST_SUBDIRS = \ + edgetpu \ ncsdk \ tensorflow \ tflite @@ -18,6 +19,10 @@ SUBDIRS = if ENABLE_EXAMPLES +if HAVE_EDGETPU +SUBDIRS += edgetpu +endif + if HAVE_NCSDK SUBDIRS += ncsdk endif diff --git a/examples/r2i/edgetpu/Makefile.am b/examples/r2i/edgetpu/Makefile.am new file mode 100644 index 00000000..f0c0f36d --- /dev/null +++ b/examples/r2i/edgetpu/Makefile.am @@ -0,0 +1,38 @@ +# Copyright (C) 2018 RidgeRun, LLC (http://www.ridgerun.com) +# All Rights Reserved. +# +# The contents of this software are proprietary and confidential to RidgeRun, +# LLC. No part of this program may be photocopied, reproduced or translated +# into another programming language without prior written consent of +# RidgeRun, LLC. The user is free to modify the source code after obtaining +# a software license from RidgeRun. All source code changes must be provided +# back to RidgeRun without any encumbrance. + +AM_DEFAULT_SOURCE_EXT = .cc + +if ENABLE_EXAMPLES + +noinst_PROGRAMS = \ + inception + +AM_CXXFLAGS = \ + $(RR_CXXFLAGS) \ + $(CODE_COVERAGE_CXXFLAGS) \ + -I../common/ + +AM_CFLAGS = \ + $(RR_CFLAGS) \ + $(CODE_COVERAGE_CFLAGS) + +AM_CPPFLAGS = \ + $(RR_CPPFLAGS) \ + $(CODE_COVERAGE_CPPFLAGS) + +LDADD = \ + $(RR_LIBS) \ + $(CODE_COVERAGE_LIBS) \ + $(TFLITE_LIBS) \ + $(EDGETPU_LIBS) \ + $(top_builddir)/r2i/libr2inference-@RR_PACKAGE_VERSION@.la + +endif # ENABLE_EXAMPLES diff --git a/examples/r2i/edgetpu/inception.cc b/examples/r2i/edgetpu/inception.cc new file mode 100644 index 00000000..7ae90602 --- /dev/null +++ b/examples/r2i/edgetpu/inception.cc @@ -0,0 +1,205 @@ +/* Copyright (C) 2018-2020 RidgeRun, LLC (http://www.ridgerun.com) + * All Rights Reserved. + * + * The contents of this software are proprietary and confidential to RidgeRun, + * LLC. No part of this program may be photocopied, reproduced or translated + * into another programming language without prior written consent of + * RidgeRun, LLC. The user is free to modify the source code after obtaining + * a software license from RidgeRun. All source code changes must be provided + * back to RidgeRun without any encumbrance. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +#define STB_IMAGE_RESIZE_IMPLEMENTATION +#include "stb_image_resize.h" + +void PrintTopPrediction (std::shared_ptr prediction) { + r2i::RuntimeError error; + int num_labels = prediction->GetResultSize(); + + std::vector results; + results.resize(num_labels); + + for (int i = 0; i < num_labels; ++i) { + results[i] = prediction->At(i, error); + } + + auto it = std::max_element(results.begin(), results.end()); + std::cout << "Highest probability is label " + << std::distance(results.begin(), it) << " (" << *it << ")" + << std::endl; + + /*std::cout << "Highest probability is label " + << index << " (" << max << ")" << std::endl;*/ +} + +void PrintUsage() { + std::cerr << "Required arguments: " + << "-i [JPG input_image] " + << "-m [Inception TfLite Model] " + << "-s [Model Input Size] " + << "-I [Input Node] " + << "-O [Output Node] \n" + << " Example: " + << " ./inception -i cat.jpg -m graph_inceptionv2_tensorflow.pb " + << "-s 224" + << std::endl; +} + +std::unique_ptr PreProcessImage (const unsigned char *input, + int width, int height, int reqwidth, int reqheight) { + + const int channels = 3; + const int scaled_size = channels * reqwidth * reqheight; + std::unique_ptr scaled (new unsigned char[scaled_size]); + std::unique_ptr adjusted (new float[scaled_size]); + + stbir_resize_uint8(input, width, height, 0, scaled.get(), reqwidth, + reqheight, 0, channels); + + for (int i = 0; i < scaled_size; i += channels) { + /* RGB = (RGB - Mean)*StdDev */ + adjusted[i + 0] = static_cast(scaled[i + 0]); + adjusted[i + 1] = static_cast(scaled[i + 1]); + adjusted[i + 2] = static_cast(scaled[i + 2]); + } + + return adjusted; +} + +std::unique_ptr LoadImage(const std::string &path, int reqwidth, + int reqheight) { + int channels = 3; + int width, height, cp; + + unsigned char *img = stbi_load(path.c_str(), &width, &height, &cp, channels); + if (!img) { + std::cerr << "The picture " << path << " could not be loaded"; + return nullptr; + } + + auto ret = PreProcessImage(img, width, height, reqwidth, reqheight); + free (img); + + return ret; +} + +bool ParseArgs (int &argc, char *argv[], std::string &image_path, + std::string &model_path, int &index, int &size, + std::string &in_node, std::string &out_node) { + + int option = 0; + while ((option = getopt(argc, argv, "i:m:p:s:I:O:")) != -1) { + switch (option) { + case 'i' : + image_path = optarg; + break; + case 'm' : + model_path = optarg; + break; + case 'p' : + index = std::stoi (optarg); + break; + case 's' : + size = std::stoi (optarg); + break; + case 'I' : + in_node = optarg; + break; + case 'O' : + out_node = optarg; + break; + default: + return false; + } + } + return true; +} + +int main (int argc, char *argv[]) { + + r2i::RuntimeError error; + std::string model_path; + std::string image_path; + std::string in_node; + std::string out_node; + int Index = 0; + int size = 0; + + if (false == ParseArgs (argc, argv, image_path, model_path, Index, + size, in_node, out_node)) { + PrintUsage (); + exit (EXIT_FAILURE); + } + + if (image_path.empty() || model_path.empty ()) { + PrintUsage (); + exit (EXIT_FAILURE); + } + + auto factory = r2i::IFrameworkFactory::MakeFactory( + r2i::FrameworkCode::EDGETPU, + error); + + if (nullptr == factory) { + std::cerr << "TensorFlow backend is not built: " << error << std::endl; + exit(EXIT_FAILURE); + } + + std::cout << "Loading Model: " << model_path << std::endl; + auto loader = factory->MakeLoader (error); + std::shared_ptr model = loader->Load (model_path, error); + if (error.IsError ()) { + std::cerr << "Loader error: " << error << std::endl; + exit(EXIT_FAILURE); + } + + std::cout << "Setting model to engine" << std::endl; + std::shared_ptr engine = factory->MakeEngine (error); + error = engine->SetModel (model); + + std::cout << "Loading image: " << image_path << std::endl; + std::unique_ptr image_data = LoadImage (image_path, size, size); + + std::cout << "Configuring frame" << std::endl; + std::shared_ptr frame = factory->MakeFrame (error); + + error = frame->Configure (image_data.get(), size, size, + r2i::ImageFormat::Id::RGB); + + std::cout << "Starting engine" << std::endl; + error = engine->Start (); + if (error.IsError ()) { + std::cerr << "Engine start error: " << error << std::endl; + exit(EXIT_FAILURE); + } + + std::cout << "Predicting..." << std::endl; + auto prediction = engine->Predict (frame, error); + if (error.IsError ()) { + std::cerr << "Engine prediction error: " << error << std::endl; + exit(EXIT_FAILURE); + } + + PrintTopPrediction (prediction); + + std::cout << "Stopping engine" << std::endl; + error = engine->Stop (); + if (error.IsError ()) { + std::cerr << "Engine stop error: " << error << std::endl; + exit(EXIT_FAILURE); + } + + return EXIT_SUCCESS; +} diff --git a/examples/r2i/edgetpu/meson.build b/examples/r2i/edgetpu/meson.build new file mode 100644 index 00000000..b568b5f2 --- /dev/null +++ b/examples/r2i/edgetpu/meson.build @@ -0,0 +1,11 @@ +# Compile examples +app_examples = [ + 'inception', +] + +foreach app : app_examples + executable(app, '@0@.cc'.format(app), + include_directories: [configinc, common_inc_dir], + dependencies : [r2inference_lib_dep], + install: false) +endforeach diff --git a/examples/r2i/meson.build b/examples/r2i/meson.build index 6bce9cef..ef662b4b 100644 --- a/examples/r2i/meson.build +++ b/examples/r2i/meson.build @@ -13,6 +13,10 @@ foreach app : app_examples install: false) endforeach +if get_option('enable-edgetpu') + subdir('edgetpu') +endif + if get_option('enable-tensorflow') subdir('tensorflow') endif From 1f15bd74fb33b6bd10d72bac6b6a03aab070ab2e Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 25 May 2020 14:52:33 +0000 Subject: [PATCH 14/29] Fix code standard issues in edgeTPU backend --- examples/r2i/edgetpu/Makefile.am | 2 +- examples/r2i/edgetpu/inception.cc | 2 +- r2i/edgetpu/engine.cc | 4 ++-- r2i/edgetpu/engine.h | 2 +- r2i/edgetpu/frameworkfactory.cc | 3 +-- r2i/tflite/engine.cc | 7 +++---- 6 files changed, 9 insertions(+), 11 deletions(-) diff --git a/examples/r2i/edgetpu/Makefile.am b/examples/r2i/edgetpu/Makefile.am index f0c0f36d..a8040ced 100644 --- a/examples/r2i/edgetpu/Makefile.am +++ b/examples/r2i/edgetpu/Makefile.am @@ -1,4 +1,4 @@ -# Copyright (C) 2018 RidgeRun, LLC (http://www.ridgerun.com) +# Copyright (C) 2020 RidgeRun, LLC (http://www.ridgerun.com) # All Rights Reserved. # # The contents of this software are proprietary and confidential to RidgeRun, diff --git a/examples/r2i/edgetpu/inception.cc b/examples/r2i/edgetpu/inception.cc index 7ae90602..33bd4265 100644 --- a/examples/r2i/edgetpu/inception.cc +++ b/examples/r2i/edgetpu/inception.cc @@ -153,7 +153,7 @@ int main (int argc, char *argv[]) { error); if (nullptr == factory) { - std::cerr << "TensorFlow backend is not built: " << error << std::endl; + std::cerr << "EdgeTPU backend is not built: " << error << std::endl; exit(EXIT_FAILURE); } diff --git a/r2i/edgetpu/engine.cc b/r2i/edgetpu/engine.cc index b56d47c1..815181f8 100644 --- a/r2i/edgetpu/engine.cc +++ b/r2i/edgetpu/engine.cc @@ -37,5 +37,5 @@ Engine::~Engine() { this->edgetpu_context.reset(); } -} //namepsace edgetpu -} //namepsace r2i +} //namespace edgetpu +} //namespace r2i diff --git a/r2i/edgetpu/engine.h b/r2i/edgetpu/engine.h index 5316ab79..90ffbcc0 100644 --- a/r2i/edgetpu/engine.h +++ b/r2i/edgetpu/engine.h @@ -14,7 +14,7 @@ #include -#include +#include namespace r2i { namespace edgetpu { diff --git a/r2i/edgetpu/frameworkfactory.cc b/r2i/edgetpu/frameworkfactory.cc index b0f8e918..c08c4f68 100644 --- a/r2i/edgetpu/frameworkfactory.cc +++ b/r2i/edgetpu/frameworkfactory.cc @@ -12,8 +12,7 @@ #include "frameworkfactory.h" #include "engine.h" -#include -#include +#include namespace r2i { namespace edgetpu { diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index b8a333c7..226a2cdf 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -11,12 +11,12 @@ #include "r2i/tflite/engine.h" +#include +#include #include -#include "r2i/tflite/prediction.h" #include "r2i/tflite/frame.h" -#include -#include +#include "r2i/tflite/prediction.h" namespace r2i { namespace tflite { @@ -224,7 +224,6 @@ std::shared_ptr Engine::Predict (std::shared_ptr Engine::~Engine () { this->Stop(); - this->interpreter.reset(); } void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver From cb9cd2180234af3cb2111a0eaa373f5791c7f84e Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 25 May 2020 17:19:58 +0000 Subject: [PATCH 15/29] Add data preprocessing to tpu example --- examples/r2i/edgetpu/inception.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/r2i/edgetpu/inception.cc b/examples/r2i/edgetpu/inception.cc index 33bd4265..7bbec384 100644 --- a/examples/r2i/edgetpu/inception.cc +++ b/examples/r2i/edgetpu/inception.cc @@ -70,9 +70,9 @@ std::unique_ptr PreProcessImage (const unsigned char *input, for (int i = 0; i < scaled_size; i += channels) { /* RGB = (RGB - Mean)*StdDev */ - adjusted[i + 0] = static_cast(scaled[i + 0]); - adjusted[i + 1] = static_cast(scaled[i + 1]); - adjusted[i + 2] = static_cast(scaled[i + 2]); + adjusted[i + 0] = (static_cast(scaled[i + 0]) - 127.5) / 127.5; + adjusted[i + 1] = (static_cast(scaled[i + 1]) - 127.5) / 127.5; + adjusted[i + 2] = (static_cast(scaled[i + 2]) - 127.5) / 127.5; } return adjusted; From 3e74239cf952bcfbffa855b2e49821a5a0af286b Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 25 May 2020 17:21:27 +0000 Subject: [PATCH 16/29] Add templated functions for fixed-point conversion --- r2i/tflite/engine.cc | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 226a2cdf..2e2d5268 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -226,6 +226,17 @@ Engine::~Engine () { this->Stop(); } +template +static uint8_t ConvertToFixedPoint(const T value, const TfLiteTensor &tensor) { + return (value / tensor.params.scale) + tensor.params.zero_point; +} + +template +static T ConvertToFloatingPoint(const uint8_t value, + const TfLiteTensor &tensor) { + return (value - tensor.params.zero_point) * tensor.params.scale; +} + void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &/*resolver*/) { // No implementation for tflite engine @@ -246,15 +257,12 @@ void Engine::PreprocessInputData(const float *input_data, const int size, } if (kTfLiteUInt8 == tensor->type) { - auto *input_fixed_tensor = interpreter->typed_input_tensor(0); + auto input_fixed_tensor = interpreter->typed_input_tensor(0); - std::vector input_data_fixed; - input_data_fixed.resize(size); + // Convert to fixed point and write the data to the input tensor for (int index = 0; index < size; index++) { - input_data_fixed[index] = (uint8_t)input_data[index]; + input_fixed_tensor[index] = ConvertToFixedPoint(input_data[index], *tensor); } - - memcpy(input_fixed_tensor, input_data_fixed.data(), input_data_fixed.size()); } else if (kTfLiteFloat32 == tensor->type) { auto input_tensor = interpreter->typed_tensor(input_indices[0]); @@ -290,8 +298,8 @@ float *Engine::GetOutputTensorData(::tflite::Interpreter *interpreter, const uint8_t *output = interpreter->typed_output_tensor(index); for (int value_index = 0; value_index < num_values; ++value_index) { - output_data[out_idx++] = (output[value_index] - out_tensor->params.zero_point) * - out_tensor->params.scale; + output_data[out_idx++] = ConvertToFloatingPoint(output[value_index], + *out_tensor); } } else if (kTfLiteFloat32 == out_tensor->type) { From d034dfbadb9f9536a30ff6218be0d140ef36e3bd Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 25 May 2020 23:40:39 +0000 Subject: [PATCH 17/29] Remmove unneeded flag in TPU example Makefile --- examples/r2i/edgetpu/Makefile.am | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/r2i/edgetpu/Makefile.am b/examples/r2i/edgetpu/Makefile.am index a8040ced..e0435030 100644 --- a/examples/r2i/edgetpu/Makefile.am +++ b/examples/r2i/edgetpu/Makefile.am @@ -31,8 +31,6 @@ AM_CPPFLAGS = \ LDADD = \ $(RR_LIBS) \ $(CODE_COVERAGE_LIBS) \ - $(TFLITE_LIBS) \ - $(EDGETPU_LIBS) \ $(top_builddir)/r2i/libr2inference-@RR_PACKAGE_VERSION@.la endif # ENABLE_EXAMPLES From 9f6b096f2b1d80236066488b48310970136af18a Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 25 May 2020 23:41:15 +0000 Subject: [PATCH 18/29] Remove commented code from TPU example --- examples/r2i/edgetpu/inception.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/r2i/edgetpu/inception.cc b/examples/r2i/edgetpu/inception.cc index 7bbec384..a971b411 100644 --- a/examples/r2i/edgetpu/inception.cc +++ b/examples/r2i/edgetpu/inception.cc @@ -39,9 +39,6 @@ void PrintTopPrediction (std::shared_ptr prediction) { std::cout << "Highest probability is label " << std::distance(results.begin(), it) << " (" << *it << ")" << std::endl; - - /*std::cout << "Highest probability is label " - << index << " (" << max << ")" << std::endl;*/ } void PrintUsage() { From f0154535ec056aaa86da706d96f22e4d37f8f0ea Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Mon, 25 May 2020 23:44:35 +0000 Subject: [PATCH 19/29] Refactor fixed pint conversion functions --- r2i/tflite/engine.cc | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 2e2d5268..b6f592a9 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -226,15 +226,16 @@ Engine::~Engine () { this->Stop(); } -template -static uint8_t ConvertToFixedPoint(const T value, const TfLiteTensor &tensor) { - return (value / tensor.params.scale) + tensor.params.zero_point; +template +static T2 ConvertToFixedPoint(const T1 value, const float scale, + const int zero_point) { + return static_cast((value / scale) + zero_point); } -template -static T ConvertToFloatingPoint(const uint8_t value, - const TfLiteTensor &tensor) { - return (value - tensor.params.zero_point) * tensor.params.scale; +template +static T1 ConvertToFloatingPoint(const T2 value, const float scale, + const int zero_point) { + return static_cast((value - zero_point) * scale); } void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver @@ -242,7 +243,7 @@ void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver // No implementation for tflite engine } -void Engine::SetInterpreterContext(::tflite::Interpreter */*interpreter*/) { +void Engine::SetInterpreterContext(::tflite::Interpreter * /*interpreter*/) { // No implementation for tflite engine } @@ -261,7 +262,8 @@ void Engine::PreprocessInputData(const float *input_data, const int size, // Convert to fixed point and write the data to the input tensor for (int index = 0; index < size; index++) { - input_fixed_tensor[index] = ConvertToFixedPoint(input_data[index], *tensor); + input_fixed_tensor[index] = ConvertToFixedPoint + (input_data[index], tensor->params.scale, tensor->params.zero_point); } } else if (kTfLiteFloat32 == tensor->type) { auto input_tensor = interpreter->typed_tensor(input_indices[0]); @@ -298,8 +300,9 @@ float *Engine::GetOutputTensorData(::tflite::Interpreter *interpreter, const uint8_t *output = interpreter->typed_output_tensor(index); for (int value_index = 0; value_index < num_values; ++value_index) { - output_data[out_idx++] = ConvertToFloatingPoint(output[value_index], - *out_tensor); + output_data[out_idx++] = ConvertToFloatingPoint + (output[value_index], + out_tensor->params.scale, out_tensor->params.zero_point); } } else if (kTfLiteFloat32 == out_tensor->type) { From 31afaa555b80223c93897690963b0a4451ab0b60 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Tue, 26 May 2020 01:22:46 +0000 Subject: [PATCH 20/29] Refactor input/output tensor handlers of tflite --- r2i/tflite/engine.cc | 103 ++++++++++++++++++++----------------------- r2i/tflite/engine.h | 6 ++- 2 files changed, 53 insertions(+), 56 deletions(-) diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index b6f592a9..46cc4225 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -13,7 +13,6 @@ #include #include -#include #include "r2i/tflite/frame.h" #include "r2i/tflite/prediction.h" @@ -210,14 +209,13 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - int output_size; - auto tensor_data = this->GetOutputTensorData(this->interpreter.get(), - output_size, error); + std::vector tensor_data; + this->GetOutputTensorData(this->interpreter.get(), tensor_data, error); if (r2i::RuntimeError::EOK != error.GetCode()) { return nullptr; } - prediction->SetTensorValues(tensor_data, output_size); + prediction->SetTensorValues(tensor_data.data(), tensor_data.size()); return prediction; } @@ -232,12 +230,31 @@ static T2 ConvertToFixedPoint(const T1 value, const float scale, return static_cast((value / scale) + zero_point); } +template +static void ConvertArrayToFixedPoint(const T1 *data, T2 *output_data, + const int size, const float scale, const int zero_point) { + for (int index = 0; index < size; index++) { + output_data[index] = static_cast(ConvertToFixedPoint(data[index], + scale, zero_point)); + } +} + template static T1 ConvertToFloatingPoint(const T2 value, const float scale, const int zero_point) { return static_cast((value - zero_point) * scale); } +template +static void ConvertArrayToFloatingPoint(const T2 *data, + std::vector &output_data, const int size, const float scale, + const int zero_point) { + for (int index = 0; index < size; index++) { + output_data[index] = ConvertToFloatingPoint(data[index], scale, + zero_point); + } +} + void Engine::SetupResolver(::tflite::ops::builtin::BuiltinOpResolver &/*resolver*/) { // No implementation for tflite engine @@ -259,15 +276,10 @@ void Engine::PreprocessInputData(const float *input_data, const int size, if (kTfLiteUInt8 == tensor->type) { auto input_fixed_tensor = interpreter->typed_input_tensor(0); - - // Convert to fixed point and write the data to the input tensor - for (int index = 0; index < size; index++) { - input_fixed_tensor[index] = ConvertToFixedPoint - (input_data[index], tensor->params.scale, tensor->params.zero_point); - } + ConvertArrayToFixedPoint(input_data, input_fixed_tensor, size, + tensor->params.scale, tensor->params.zero_point); } else if (kTfLiteFloat32 == tensor->type) { auto input_tensor = interpreter->typed_tensor(input_indices[0]); - memcpy(input_tensor, input_data, size * sizeof(float)); } else { error.Set (RuntimeError::Code::WRONG_API_USAGE, @@ -276,52 +288,35 @@ void Engine::PreprocessInputData(const float *input_data, const int size, } } -float *Engine::GetOutputTensorData(::tflite::Interpreter *interpreter, - int &output_size, - r2i::RuntimeError &error) { - std::vector output_data; - const auto &output_indices = interpreter->outputs(); - const int num_outputs = output_indices.size(); - - int out_idx = 0; - for (int index = 0; index < num_outputs; ++index) { - const auto *out_tensor = interpreter->tensor(output_indices[index]); +void Engine::GetOutputTensorData(::tflite::Interpreter *interpreter, + std::vector &output_data, + r2i::RuntimeError &error) { - if (nullptr == out_tensor) { - error.Set (RuntimeError::Code::FRAMEWORK_ERROR, - "Output tensor is null"); - return nullptr; - } + const auto &output_indices = interpreter->outputs(); + const auto *out_tensor = interpreter->tensor(output_indices[0]); - if (kTfLiteUInt8 == out_tensor->type) { - - const int num_values = out_tensor->bytes; - output_data.resize(out_idx + num_values); - const uint8_t *output = interpreter->typed_output_tensor(index); - - for (int value_index = 0; value_index < num_values; ++value_index) { - output_data[out_idx++] = ConvertToFloatingPoint - (output[value_index], - out_tensor->params.scale, out_tensor->params.zero_point); - } - } else if (kTfLiteFloat32 == out_tensor->type) { - - const int num_values = out_tensor->bytes / sizeof(float); - output_data.resize(out_idx + num_values); - const float *output = interpreter->typed_output_tensor(index); - - for (int value_index = 0; value_index < num_values; ++value_index) { - output_data[out_idx++] = output[value_index]; - } - } else { - error.Set (RuntimeError::Code::WRONG_API_USAGE, - "Output tensor has unsupported output type"); - return nullptr; - } + if (nullptr == out_tensor) { + error.Set (RuntimeError::Code::FRAMEWORK_ERROR, + "Output tensor is null"); + return; } - output_size = output_data.size(); - return output_data.data(); + if (kTfLiteUInt8 == out_tensor->type) { + const int num_values = out_tensor->bytes; + output_data.resize(num_values); + const uint8_t *output = interpreter->typed_output_tensor(0); + ConvertArrayToFloatingPoint(output, output_data, num_values, + out_tensor->params.scale, out_tensor->params.zero_point); + } else if (kTfLiteFloat32 == out_tensor->type) { + const int num_values = out_tensor->bytes / sizeof(float); + output_data.resize(num_values); + const float *output = interpreter->typed_output_tensor(0); + memcpy(&output_data[0], output, num_values * sizeof(float)); + } else { + error.Set (RuntimeError::Code::WRONG_API_USAGE, + "Output tensor has unsupported output type"); + return; + } } } //namespace tflite diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index c1a49ebf..77f9f61f 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -61,8 +62,9 @@ class Engine : public IEngine { private: void PreprocessInputData(const float *input_data, const int size, ::tflite::Interpreter *interpreter, r2i::RuntimeError &error); - float *GetOutputTensorData(::tflite::Interpreter *interpreter, int &output_size, - r2i::RuntimeError &error); + void GetOutputTensorData(::tflite::Interpreter *interpreter, + std::vector &output_data, + r2i::RuntimeError &error); }; From 0610d53135f7c230dc612e45b9a744922801d000 Mon Sep 17 00:00:00 2001 From: Luis Alonso Murillo Rojas Date: Tue, 26 May 2020 01:34:00 +0000 Subject: [PATCH 21/29] Move context setting to the tflite start method --- r2i/tflite/engine.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 46cc4225..6933a4b9 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -90,6 +90,8 @@ RuntimeError Engine::Start () { return error; } + this->SetInterpreterContext(interpreter.get()); + std::shared_ptr<::tflite::Interpreter> tflite_interpreter_shared{std::move(interpreter)}; this->interpreter = tflite_interpreter_shared; @@ -168,8 +170,6 @@ std::shared_ptr Engine::Predict (std::shared_ptr return nullptr; } - this->SetInterpreterContext(this->interpreter.get()); - if (this->number_of_threads > 0) { interpreter->SetNumThreads(this->number_of_threads); } From 99860ae757584dea2cc24e60486e516af1e7863b Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Tue, 19 May 2020 08:23:32 -0600 Subject: [PATCH 22/29] Add matrix to run autotools job in multiple containers --- .github/workflows/tests.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index a3383425..83d24dc9 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -5,14 +5,20 @@ on: branches: - master - dev-* + - feature/extend-ci-ubuntu-16.04 pull_request: branches: - dev-* jobs: build_tensorflow_autotools: - runs-on: ubuntu-18.04 - container: ridgerun/r2inference:v0.1.4 + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: + image: ${{ matrix.container }} env: CXXFLAGS: "-Werror" steps: From 9a54f6c2044a57edc0ec518e21625e99d6e0c672 Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Tue, 19 May 2020 08:33:43 -0600 Subject: [PATCH 23/29] Add matrix in all jobs --- .github/workflows/tests.yaml | 45 ++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 83d24dc9..a1dfbcef 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -47,8 +47,13 @@ jobs: make ./list-backends build_tflite_autotools: - runs-on: ubuntu-18.04 - container: ridgerun/r2inference:v0.1.4 + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: + image: ${{ matrix.container }} env: TENSORFLOW_PATH: /root/r2inference/backends/tflite/v2.0.1/include/tensorflow CPPFLAGS: "-I${TENSORFLOW_PATH} -I${TENSORFLOW_PATH}/tensorflow/lite/tools/make/downloads/flatbuffers/include" @@ -78,8 +83,13 @@ jobs: make ./list-backends build_tensorflow_tflite_autotools: - runs-on: ubuntu-18.04 - container: ridgerun/r2inference:v0.1.4 + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: + image: ${{ matrix.container }} env: TENSORFLOW_PATH: /root/r2inference/backends/tflite/v2.0.1/include/tensorflow CPPFLAGS: "-I${TENSORFLOW_PATH} -I${TENSORFLOW_PATH}/tensorflow/lite/tools/make/downloads/flatbuffers/include" @@ -111,8 +121,13 @@ jobs: make ./list-backends build_tensorflow_meson: - runs-on: ubuntu-18.04 - container: ridgerun/r2inference:v0.1.4 + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: + image: ${{ matrix.container }} env: CXXFLAGS: "-Werror" steps: @@ -137,8 +152,13 @@ jobs: cd build/examples/external ./list_backends build_tflite_meson: - runs-on: ubuntu-18.04 - container: ridgerun/r2inference:v0.1.4 + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: + image: ${{ matrix.container }} env: TENSORFLOW_PATH: /root/r2inference/backends/tflite/v2.0.1/include/tensorflow CPPFLAGS: "-I${TENSORFLOW_PATH} -I${TENSORFLOW_PATH}/tensorflow/lite/tools/make/downloads/flatbuffers/include" @@ -165,8 +185,13 @@ jobs: cd build/examples/external ./list_backends build_tensorflow_tflite_meson: - runs-on: ubuntu-18.04 - container: ridgerun/r2inference:v0.1.4 + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: + image: ${{ matrix.container }} env: TENSORFLOW_PATH: /root/r2inference/backends/tflite/v2.0.1/include/tensorflow CPPFLAGS: "-I${TENSORFLOW_PATH} -I${TENSORFLOW_PATH}/tensorflow/lite/tools/make/downloads/flatbuffers/include" From 677d3d49e3441ec51d7f4e9f1ed940e6f9fb059f Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Wed, 20 May 2020 12:26:23 -0600 Subject: [PATCH 24/29] Run in Ubuntu 16.04 image only the jobs that don't enable tflite This is currently because the Ubuntu 16.04 image doesn't correctly allow building succesfully r2inference when the tflite backend is enabled. --- .github/workflows/tests.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index a1dfbcef..e7c7152a 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -16,7 +16,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] container: image: ${{ matrix.container }} env: @@ -51,7 +51,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} env: @@ -87,7 +87,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} env: @@ -125,7 +125,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] container: image: ${{ matrix.container }} env: @@ -156,7 +156,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} env: @@ -189,7 +189,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.4", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} env: From be8dbfda790a22ce63bde68e77e9edc732a1e70c Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Wed, 20 May 2020 12:33:33 -0600 Subject: [PATCH 25/29] Remove testing branch --- .github/workflows/tests.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index e7c7152a..e89983aa 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -5,7 +5,6 @@ on: branches: - master - dev-* - - feature/extend-ci-ubuntu-16.04 pull_request: branches: - dev-* From 64667157ec185c9c7dca08aa5314b855930b575b Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Wed, 20 May 2020 16:08:33 -0600 Subject: [PATCH 26/29] Use newest version of Ubuntu 16.04 image --- .github/workflows/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index e89983aa..ccb09b71 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] container: image: ${{ matrix.container }} env: @@ -124,7 +124,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04] - container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.1.0"] + container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] container: image: ${{ matrix.container }} env: From a9a3c69ccf45f16edcda46366438ba29d420e75c Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Mon, 25 May 2020 15:28:03 -0600 Subject: [PATCH 27/29] Remove os definition from the matrix --- .github/workflows/tests.yaml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index ccb09b71..98914f0a 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -11,10 +11,9 @@ on: jobs: build_tensorflow_autotools: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-18.04 strategy: matrix: - os: [ubuntu-18.04] container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] container: image: ${{ matrix.container }} @@ -46,10 +45,9 @@ jobs: make ./list-backends build_tflite_autotools: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-18.04 strategy: matrix: - os: [ubuntu-18.04] container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} @@ -82,10 +80,9 @@ jobs: make ./list-backends build_tensorflow_tflite_autotools: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-18.04 strategy: matrix: - os: [ubuntu-18.04] container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} @@ -120,10 +117,9 @@ jobs: make ./list-backends build_tensorflow_meson: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-18.04 strategy: matrix: - os: [ubuntu-18.04] container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] container: image: ${{ matrix.container }} @@ -151,10 +147,9 @@ jobs: cd build/examples/external ./list_backends build_tflite_meson: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-18.04 strategy: matrix: - os: [ubuntu-18.04] container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} @@ -184,10 +179,9 @@ jobs: cd build/examples/external ./list_backends build_tensorflow_tflite_meson: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-18.04 strategy: matrix: - os: [ubuntu-18.04] container: ["ridgerun/r2inference:v0.1.5"] container: image: ${{ matrix.container }} From b0515a35cd23e23c8dd409c6da7f02ceffbdd099 Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Mon, 25 May 2020 15:48:03 -0600 Subject: [PATCH 28/29] Use consistent naming for the docker images --- .github/workflows/tests.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 98914f0a..cacd9909 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] container: image: ${{ matrix.container }} env: @@ -48,7 +48,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference:v0.1.5"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5"] container: image: ${{ matrix.container }} env: @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference:v0.1.5"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5"] container: image: ${{ matrix.container }} env: @@ -120,7 +120,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] container: image: ${{ matrix.container }} env: @@ -150,7 +150,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference:v0.1.5"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5"] container: image: ${{ matrix.container }} env: @@ -182,7 +182,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference:v0.1.5"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5"] container: image: ${{ matrix.container }} env: From 88339f3deed3bc6156d82b7b3fa605b49fa6adc3 Mon Sep 17 00:00:00 2001 From: jafet-chaves Date: Tue, 26 May 2020 08:08:08 -0600 Subject: [PATCH 29/29] Use image version with default OpenCV installation (deb packages) --- .github/workflows/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index cacd9909..2981fc63 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.1"] container: image: ${{ matrix.container }} env: @@ -120,7 +120,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.0"] + container: ["ridgerun/r2inference-ubuntu-18.04:v0.1.5", "ridgerun/r2inference-ubuntu-16.04:v0.3.1"] container: image: ${{ matrix.container }} env: