TensorFlow Serving C++ API Documentation
server_core_test_util.cc
1 /* Copyright 2016 Google Inc. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7  http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow_serving/model_servers/test_util/server_core_test_util.h"
17 
18 #include <memory>
19 #include <utility>
20 
21 #include "tensorflow/core/lib/core/status_test_util.h"
22 #include "tensorflow/core/lib/io/path.h"
23 #include "tensorflow_serving/core/availability_preserving_policy.h"
24 #include "tensorflow_serving/core/test_util/fake_loader_source_adapter.h"
25 #include "tensorflow_serving/model_servers/model_platform_types.h"
26 #include "tensorflow_serving/model_servers/platform_config_util.h"
27 #include "tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.pb.h"
28 #include "tensorflow_serving/servables/tensorflow/session_bundle_config.pb.h"
29 #include "tensorflow_serving/test_util/test_util.h"
30 
31 namespace tensorflow {
32 namespace serving {
33 namespace test_util {
34 
35 namespace {
36 
37 void AddSessionRunLoadThreadPool(SessionBundleConfig* const bundle_config) {
38  auto* const session_config = bundle_config->mutable_session_config();
39  session_config->add_session_inter_op_thread_pool();
40  // The second pool will be used for loading.
41  session_config->add_session_inter_op_thread_pool()->set_num_threads(4);
42  bundle_config->mutable_session_run_load_threadpool_index()->set_value(1);
43 }
44 
45 } // namespace
46 
47 Status CreateServerCore(const ModelServerConfig& config,
48  ServerCore::Options options,
49  std::unique_ptr<ServerCore>* server_core) {
50  options.model_server_config = config;
51  return ServerCore::Create(std::move(options), server_core);
52 }
53 
54 Status CreateServerCore(const ModelServerConfig& config,
55  std::unique_ptr<ServerCore>* server_core) {
56  return CreateServerCore(config, ServerCoreTest::GetDefaultOptions(),
57  server_core);
58 }
59 
60 ServerCore::Options ServerCoreTest::GetDefaultOptions() {
61  ServerCore::Options options;
62  options.file_system_poll_wait_seconds = 1;
63  // Reduce the number of initial load threads to be num_load_threads to avoid
64  // timing out in tests.
65  options.num_initial_load_threads = options.num_load_threads;
66  options.aspired_version_policy =
67  std::unique_ptr<AspiredVersionPolicy>(new AvailabilityPreservingPolicy);
68  options.custom_model_config_loader =
69  [](const ::google::protobuf::Any& any, EventBus<ServableState>* event_bus,
70  UniquePtrWithDeps<AspiredVersionsManager>* manager) -> Status {
71  return Status();
72  };
73 
74  SessionBundleConfig bundle_config;
75  AddSessionRunLoadThreadPool(&bundle_config);
76 
77  options.platform_config_map =
78  CreateTensorFlowPlatformConfigMap(bundle_config);
79  ::google::protobuf::Any fake_source_adapter_config;
80  fake_source_adapter_config.PackFrom(
81  test_util::FakeLoaderSourceAdapterConfig());
82  (*(*options.platform_config_map.mutable_platform_configs())[kFakePlatform]
83  .mutable_source_adapter_config()) = fake_source_adapter_config;
84 
85  return options;
86 }
87 
88 ModelServerConfig ServerCoreTest::GetTestModelServerConfigForFakePlatform() {
89  ModelServerConfig config = GetTestModelServerConfigForTensorflowPlatform();
90  ModelConfig* model_config =
91  config.mutable_model_config_list()->mutable_config(0);
92  model_config->set_model_platform(kFakePlatform);
93  return config;
94 }
95 
96 ModelServerConfig
97 ServerCoreTest::GetTestModelServerConfigForTensorflowPlatform() {
98  ModelServerConfig config;
99  auto model = config.mutable_model_config_list()->add_config();
100  model->set_name(kTestModelName);
101  if (GetTestType() == SAVED_MODEL) {
102  model->set_base_path(test_util::TensorflowTestSrcDirPath(
103  "/cc/saved_model/testdata/half_plus_two"));
104  } else {
105  model->set_base_path(test_util::TestSrcDirPath(
106  "/servables/tensorflow/google/testdata/half_plus_two"));
107  }
108  if (PrefixPathsWithURIScheme()) {
109  model->set_base_path(io::CreateURI("file", "", model->base_path()));
110  }
111  model->set_model_platform(kTensorFlowModelPlatform);
112  return config;
113 }
114 
115 void ServerCoreTest::SwitchToHalfPlusTwoWith2Versions(
116  ModelServerConfig* config) {
117  CHECK_EQ(1, config->model_config_list().config().size());
118  auto model = config->mutable_model_config_list()->mutable_config(0);
119  if (GetTestType() == SAVED_MODEL) {
120  model->set_base_path(test_util::TestSrcDirPath(
121  "/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions"));
122  } else {
123  model->set_base_path(test_util::TestSrcDirPath(
124  "/servables/tensorflow/google/testdata/half_plus_two_2_versions"));
125  }
126  // Request loading both versions simultaneously.
127  model->clear_model_version_policy();
128  model->mutable_model_version_policy()->mutable_all();
129  if (PrefixPathsWithURIScheme()) {
130  model->set_base_path(io::CreateURI("file", "", model->base_path()));
131  }
132 }
133 
134 Status ServerCoreTest::CreateServerCore(
135  const ModelServerConfig& config, ServerCore::Options options,
136  std::unique_ptr<ServerCore>* server_core) {
137  return test_util::CreateServerCore(config, std::move(options), server_core);
138 }
139 
140 } // namespace test_util
141 } // namespace serving
142 } // namespace tensorflow
static Status Create(Options options, std::unique_ptr< ServerCore > *core)
Definition: server_core.cc:231