16 #include "tensorflow_serving/servables/tensorflow/multi_inference_helper.h"
18 #include "tensorflow/cc/saved_model/loader.h"
19 #include "tensorflow_serving/apis/input.pb.h"
20 #include "tensorflow_serving/apis/model.pb.h"
21 #include "tensorflow_serving/servables/tensorflow/multi_inference.h"
23 namespace tensorflow {
28 const ModelSpec& GetModelSpecFromRequest(
const MultiInferenceRequest& request) {
29 if (request.tasks_size() > 0 && request.tasks(0).has_model_spec()) {
30 return request.tasks(0).model_spec();
32 return ModelSpec::default_instance();
37 Status RunMultiInferenceWithServerCore(
38 const RunOptions& run_options, ServerCore* core,
39 const tensorflow::thread::ThreadPoolOptions& thread_pool_options,
40 const MultiInferenceRequest& request, MultiInferenceResponse* response) {
41 return RunMultiInferenceWithServerCoreWithModelSpec(
42 run_options, core, thread_pool_options, GetModelSpecFromRequest(request),
46 Status RunMultiInferenceWithServerCoreWithModelSpec(
47 const RunOptions& run_options, ServerCore* core,
48 const tensorflow::thread::ThreadPoolOptions& thread_pool_options,
49 const ModelSpec& model_spec,
const MultiInferenceRequest& request,
50 MultiInferenceResponse* response) {
51 ServableHandle<SavedModelBundle> bundle;
52 TF_RETURN_IF_ERROR(core->GetServableHandle(model_spec, &bundle));
54 return RunMultiInference(run_options, bundle->meta_graph_def,
55 bundle.id().version, bundle->session.get(), request,
56 response, thread_pool_options);