Skip to content

Commit

Permalink
Update tensorflow-serving docker (#2509)
Browse files Browse the repository at this point in the history
  • Loading branch information
zufangzhu authored and Dboyqiao committed Nov 22, 2023
1 parent 552e9cd commit a3f8c28
Show file tree
Hide file tree
Showing 8 changed files with 101 additions and 31 deletions.
5 changes: 4 additions & 1 deletion docker/tensorflow-serving/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,16 @@ To build the docker container, enter into [docker/tensorflow-serving](./) folder

### I. Binaries Preparation

Refer to [Install for Tensorflow Serving](../../docs/guide/tensorflow_serving.md) to build the TensorFlow Serving binary, and refer to [Install for CPP](../../docs/install/install_for_cpp.md) to build the Intel® Extension for TensorFlow* CC library from source. Then package and copy these binaries into the `./models/binaries` directory, as shown below.
Refer to [Install for Tensorflow Serving](../../docs/guide/tf_serving_install.md) to build the TensorFlow Serving binary, and refer to [Install for CPP](../../docs/install/install_for_cpp.md) to build the Intel® Extension for TensorFlow* CC library from source. Then package and copy these binaries into the `./models/binaries` directory, as shown below.

```bash
mkdir -p ./models/binaries

# Package and copy Intel® Extension for TensorFlow* CC library
mkdir -p itex-bazel-bin/
cp -r <path_to_itex>/bazel-out/k8-opt-ST-*/bin/ itex-bazel-bin/
# if you build with threadpool
cp -r <path_to_itex>/bazel-out/k8-opt-ST-*/bin/ itex-bazel-bin/bin_threadpool/
tar cvfh itex-bazel-bin.tar itex-bazel-bin/
cp itex-bazel-bin.tar ./models/binaries/

Expand Down Expand Up @@ -61,3 +63,4 @@ docker run -v <your-local-dir>:/workspace \
-it \
$IMAGE_NAME
```
NOTE: If you want to run docker with threadpool, you should add `-e ITEX_OMP_THREADPOOL=0`
12 changes: 6 additions & 6 deletions docker/tensorflow-serving/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ IMAGE_NAME=intel-extension-for-tensorflow:serving-$IMAGE_TYPE
if [ $IMAGE_TYPE == "gpu" ]
then
docker build --no-cache --build-arg UBUNTU_VERSION=22.04 \
--build-arg ICD_VER=23.17.26241.33-647~22.04 \
--build-arg LEVEL_ZERO_GPU_VER=1.3.26241.33-647~22.04 \
--build-arg LEVEL_ZERO_VER=1.11.0-647~22.04 \
--build-arg LEVEL_ZERO_DEV_VER=1.11.0-647~22.04 \
--build-arg DPCPP_VER=2023.2.0-49495 \
--build-arg MKL_VER=2023.2.0-49495 \
--build-arg ICD_VER=23.30.26918.50-736~22.04 \
--build-arg LEVEL_ZERO_GPU_VER=1.3.26918.50-736~22.04 \
--build-arg LEVEL_ZERO_VER=1.13.1-719~22.04 \
--build-arg LEVEL_ZERO_DEV_VER=1.13.1-719~22.04 \
--build-arg DPCPP_VER=2024.0.0-49819 \
--build-arg MKL_VER=2024.0.0-49656 \
--build-arg TF_SERVING_BINARY=tensorflow_model_server \
--build-arg TF_PLUGIN_TAR=itex-bazel-bin.tar \
-t $IMAGE_NAME \
Expand Down
8 changes: 7 additions & 1 deletion docker/tensorflow-serving/itex-serving-cpu.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,16 @@ RUN mkdir -p ${MODEL_BASE_PATH}
ENV MODEL_NAME=my_model
RUN mkdir -p ${MODEL_BASE_PATH}/${MODEL_NAME}

ENV ITEX_OMP_THREADPOOL=1
RUN echo '#!/bin/bash \n\n\
if [ ${ITEX_OMP_THREADPOOL} == 1 ]; then \n\
DIR=/itex/itex-bazel-bin/bin/itex \n\
else \n\
DIR=/itex/itex-bazel-bin/bin_threadpool/itex \n\
fi \n\
/usr/local/bin/tensorflow_model_server --port=8500 --rest_api_port=8501 \
--model_name=${MODEL_NAME} --model_base_path=${MODEL_BASE_PATH}/${MODEL_NAME} \
--tensorflow_plugins=/itex/itex-bazel-bin/bin/itex \
--tensorflow_plugins=${DIR} \
"$@"' > /usr/bin/tf_serving_entrypoint.sh \
&& chmod +x /usr/bin/tf_serving_entrypoint.sh

Expand Down
4 changes: 2 additions & 2 deletions docker/tensorflow-serving/itex-serving-gpu.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ RUN apt-get update && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

RUN wget -qO - https://repositories.intel.com/graphics/intel-graphics.key | \
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy max" | \
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy unified" | \
tee /etc/apt/sources.list.d/intel-gpu-jammy.list

ARG ICD_VER
Expand Down
13 changes: 3 additions & 10 deletions docs/guide/tf_serving_install.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ The generated `libitex_cpu_cc.so` or `libitex_gpu_cc.so` binary are found in the
git clone https://github.com/tensorflow/tensorflow
# checkout specific commit id
cd tensorflow
cd tensorflow
git checkout xxxxx
```
- Add `alwayslink=1` for `kernels_experimental` library in local `tensorflow/tensorflow/c/BUILD` file:
Expand Down Expand Up @@ -72,15 +72,8 @@ The generated `libitex_cpu_cc.so` or `libitex_gpu_cc.so` binary are found in the
- Patch TensorFlow Serving
```
cd serving
patch -p1 -i ../intel-extension-for-tensorflow/third_party/tf_serving/serving_plugin.patch
```
- Update `serving/WORKSPACE` to use local TensorFlow
Replace L24-L29 with below code to use local TensorFlow: https://github.com/tensorflow/serving/blob/master/WORKSPACE#L24
```
local_repository(
name= "org_tensorflow",
path = "path to local tensorflow source code",
)
git checkout r2.14
git apply ../intel-extension-for-tensorflow/third_party/tf_serving/serving_plugin.patch
```

- Build TensorFlow Serving
Expand Down
6 changes: 6 additions & 0 deletions docs/install/install_for_cpp.md
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,12 @@ For CPU support
$ bazel build -c opt --config=cpu //itex:libitex_cpu_cc.so
```

If you want to build with threadpool, you should add buid options `--define=build_with_threadpool=true` and environment variables `ITEX_OMP_THREADPOOL=0`

```bash
$ bazel build -c opt --config=cpu --define=build_with_threadpool=true //itex:libitex_cpu_cc.so
```

CC library location: `<Path to intel-extension-for-tensorflow>/bazel-bin/itex/libitex_cpu_cc.so`

NOTE: `libitex_cpu_cc.so` is depended on `libiomp5.so`, so `libiomp5.so` shoule be copied to the same diretcory of `libitex_cpu_cc.so`
Expand Down
10 changes: 5 additions & 5 deletions docs/install/install_for_xpu.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ To use Intel® Optimization for Horovod* with the Intel® oneAPI Collective Comm
```
$ docker pull intel/intel-extension-for-tensorflow:xpu
$ docker run -it -p 8888:8888 --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path --ipc=host intel/intel-extension-for-tensorflow:xpu
$ export LD_LIBRARY_PATH=/opt/intel/oneapi/lib:/opt/intel/oneapi/lib/intel64:/opt/intel/oneapi/lib/intel64/libfabric:$LD_LIBRARY_PATH
$ export PATH=/opt/intel/oneapi/lib/intel64/bin:$PATH
$ export I_MPI_ROOT=/opt/intel/oneapi/lib/intel64/
$ export CCL_ROOT=/opt/intel/oneapi/lib/intel64/
$ export FI_PROVIDER_PATH=/opt/intel/oneapi/lib/intel64/libfabric/
$ export LD_LIBRARY_PATH=/opt/intel/oneapi/redist/opt/mpi/libfabric/lib:$LD_LIBRARY_PATH
$ export PATH=/opt/intel/oneapi/redist/bin:$PATH
$ export I_MPI_ROOT=/opt/intel/oneapi/redist/lib
$ export CCL_ROOT=/opt/intel/oneapi/redist
$ export FI_PROVIDER_PATH=/opt/intel/oneapi/redist/opt/mpi/libfabric/lib/prov
```

Then go to your browser on http://localhost:8888/
Expand Down
74 changes: 68 additions & 6 deletions third_party/tf_serving/serving_plugin.patch
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,19 @@ index 7a017679..270d594e 100644
+ visibility = ["//visibility:public"],
+)
diff --git a/tensorflow_serving/model_servers/BUILD b/tensorflow_serving/model_servers/BUILD
index 2809e2af..3045c42f 100644
index 616d887a..0387aaee 100644
--- a/tensorflow_serving/model_servers/BUILD
+++ b/tensorflow_serving/model_servers/BUILD
@@ -1,5 +1,6 @@
# Description: Model Server
@@ -2,6 +2,8 @@

# Placeholder: load py_test
# Placeholder: load py_binary
+
+load("//tensorflow_serving:serving.bzl", "if_with_plugins_support")
load("//tensorflow_serving:tensorflow_version.bzl", "if_not_v2", "if_v2")
load("@rules_pkg//:pkg.bzl", "pkg_deb", "pkg_tar")
load("@org_tensorflow//tensorflow:tensorflow.bzl", "if_google", "if_libtpu", "if_with_tpu_support")
@@ -417,7 +418,11 @@ cc_library(
@@ -421,7 +423,11 @@ cc_library(
"@org_tensorflow//tensorflow/core:protos_all_cc",
"@org_tensorflow//tensorflow/core:tensorflow",
"@org_tensorflow//tensorflow/core/profiler/rpc:profiler_service_impl",
Expand All @@ -51,15 +53,15 @@ index 2809e2af..3045c42f 100644
)

cc_library(
@@ -435,7 +440,6 @@ cc_library(
@@ -439,7 +445,6 @@ cc_library(
],
deps = [
":server_lib",
- "@org_tensorflow//tensorflow/c:c_api",
"@org_tensorflow//tensorflow/compiler/jit:xla_cpu_jit",
"@org_tensorflow//tensorflow/core:lib",
"@org_tensorflow//tensorflow/core/platform/cloud:gcs_file_system",
@@ -452,6 +456,14 @@ cc_library(
@@ -456,6 +461,14 @@ cc_library(

cc_binary(
name = "tensorflow_model_server",
Expand Down Expand Up @@ -151,6 +153,66 @@ index 03467d6a..26dfdb99 100644

Options();
};
diff --git a/tensorflow_serving/model_servers/test_util/BUILD b/tensorflow_serving/model_servers/test_util/BUILD
index dcc97948..95d2ac7f 100644
--- a/tensorflow_serving/model_servers/test_util/BUILD
+++ b/tensorflow_serving/model_servers/test_util/BUILD
@@ -31,6 +31,7 @@ cc_library(
"//visibility:public",
],
deps = [
+ "//tensorflow_serving/apis:logging_cc_proto",
"//tensorflow_serving/apis:model_cc_proto",
"//tensorflow_serving/config:model_server_config_cc_proto",
"//tensorflow_serving/config:platform_config_cc_proto",
diff --git a/tensorflow_serving/model_servers/test_util/mock_server_core.h b/tensorflow_serving/model_servers/test_util/mock_server_core.h
index ecde432a..64675eee 100644
--- a/tensorflow_serving/model_servers/test_util/mock_server_core.h
+++ b/tensorflow_serving/model_servers/test_util/mock_server_core.h
@@ -19,6 +19,7 @@ limitations under the License.

#include <memory>
#include <string>
+#include <utility>

#include "base/logging.h"
#include "google/protobuf/any.pb.h"
@@ -56,7 +57,9 @@ class MockServerCore : public ServerCore {
return platform_config_map;
}

- static Options GetOptions(const PlatformConfigMap& platform_config_map) {
+ static Options GetOptions(
+ const PlatformConfigMap& platform_config_map,
+ std::unique_ptr<ServerRequestLogger> server_request_logger) {
Options options;
options.platform_config_map = platform_config_map;
options.servable_state_monitor_creator =
@@ -71,13 +74,21 @@ class MockServerCore : public ServerCore {
UniquePtrWithDeps<AspiredVersionsManager>* manager) -> Status {
return Status();
};
- TF_CHECK_OK(
- ServerRequestLogger::Create(nullptr, &options.server_request_logger));
+ if (server_request_logger != nullptr) {
+ options.server_request_logger = std::move(server_request_logger);
+ } else {
+ TF_CHECK_OK(
+ ServerRequestLogger::Create(nullptr, &options.server_request_logger));
+ }
return options;
}

explicit MockServerCore(const PlatformConfigMap& platform_config_map)
- : ServerCore(GetOptions(platform_config_map)) {}
+ : MockServerCore(platform_config_map, nullptr) {}
+ MockServerCore(const PlatformConfigMap& platform_config_map,
+ std::unique_ptr<ServerRequestLogger> server_request_logger)
+ : ServerCore(GetOptions(platform_config_map,
+ std::move(server_request_logger))) {}

MOCK_METHOD(ServableStateMonitor*, servable_state_monitor, (),
(const, override));
diff --git a/tensorflow_serving/model_servers/tf_c_api_exported_symbols.lds b/tensorflow_serving/model_servers/tf_c_api_exported_symbols.lds
new file mode 100644
index 00000000..b5e82a09
Expand Down

0 comments on commit a3f8c28

Please sign in to comment.