TensorFlow Serving C++ API Documentation
request_logger.h
1 /* Copyright 2016 Google Inc. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7  http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_SERVING_CORE_REQUEST_LOGGER_H_
17 #define TENSORFLOW_SERVING_CORE_REQUEST_LOGGER_H_
18 
19 #include <random>
20 #include <vector>
21 
22 #include "google/protobuf/message.h"
23 #include "tensorflow/core/lib/core/status.h"
24 #include "tensorflow_serving/apis/logging.pb.h"
25 #include "tensorflow_serving/config/logging_config.pb.h"
26 #include "tensorflow_serving/core/log_collector.h"
27 #include "tensorflow_serving/core/stream_logger.h"
28 
29 namespace tensorflow {
30 namespace serving {
31 
32 // Abstraction to log requests and responses hitting a server. The log storage
33 // is handled by the log-collector. We sample requests based on the config.
34 // All subclasses must only implement a factory method that returns a
35 // shared_ptr.
36 class RequestLogger : public std::enable_shared_from_this<RequestLogger> {
37  public:
38  RequestLogger(const LoggingConfig& logging_config,
39  const std::vector<string>& saved_model_tags,
40  std::unique_ptr<LogCollector> log_collector);
41 
42  virtual ~RequestLogger() = default;
43 
44  // Writes the log for the particular request, response and metadata, if we
45  // decide to sample it.
46  Status Log(const google::protobuf::Message& request, const google::protobuf::Message& response,
47  const LogMetadata& log_metadata);
48 
49  // Starts logging a stream through returning a StreamLogger through
50  // `get_stream_logger_fn` and registers a log callback. Returns NULL if the
51  // stream should not be logged.
52  template <typename Request, typename Response>
53  using GetStreamLoggerFn = std::function<StreamLogger<Request, Response>*()>;
54  template <typename Request, typename Response>
55  void MaybeStartLoggingStream(
56  const LogMetadata& log_metadata,
57  GetStreamLoggerFn<Request, Response> get_stream_logger_fn);
58 
59  const LoggingConfig& logging_config() const { return logging_config_; }
60 
61  private:
62  // Creates the log message given the request, response and metadata.
63  // Implementations override it to create the particular message that they want
64  // to be logged.
65  virtual Status CreateLogMessage(const google::protobuf::Message& request,
66  const google::protobuf::Message& response,
67  const LogMetadata& log_metadata,
68  std::unique_ptr<google::protobuf::Message>* log) = 0;
69 
70  // Implementations can fill up additional information to LogMetadata.
71  virtual LogMetadata FillLogMetadata(const LogMetadata& lm_in) = 0;
72 
73  // Writes the log.
74  Status Log(const google::protobuf::Message& log);
75 
76  // A sampler which samples uniformly at random.
77  class UniformSampler {
78  public:
79  UniformSampler() : rd_(), gen_(rd_()), dist_(0, 1) {}
80 
81  // Returns true if the sampler decides to sample it with a probability
82  // 'rate'.
83  bool Sample(const double rate) { return dist_(gen_) < rate; }
84 
85  private:
86  std::random_device rd_;
87  std::mt19937 gen_;
88  std::uniform_real_distribution<double> dist_;
89  };
90 
91  const LoggingConfig logging_config_;
92  const std::vector<string> saved_model_tags_;
93  std::unique_ptr<LogCollector> log_collector_;
94  UniformSampler uniform_sampler_;
95 };
96 
97 /**************************Implementation Detail******************************/
98 template <typename Request, typename Response>
99 void RequestLogger::MaybeStartLoggingStream(
100  const LogMetadata& log_metadata,
101  GetStreamLoggerFn<Request, Response> get_stream_logger_fn) {
102  // Sampling happens at the beginning of logging to avoid logging overhead.
103  // if request logger goes away during a stream which could happen due to
104  // loggin config update, the stream won't be logged.
105  if (!uniform_sampler_.Sample(
106  logging_config_.sampling_config().sampling_rate())) {
107  return;
108  }
109 
110  auto* stream_logger = get_stream_logger_fn();
111  if (stream_logger == nullptr) return;
112 
113  LogMetadata lm_out = FillLogMetadata(log_metadata);
114  std::weak_ptr<RequestLogger> logger_ref(shared_from_this());
115  stream_logger->AddLogCallback(
116  lm_out, [logger_ref = std::move(logger_ref)](const google::protobuf::Message& log) {
117  // The callback refers back to the request logger. If the
118  // request logger goes away after creation but before stream
119  // ends, we simply skip this sink.
120  if (auto logger = logger_ref.lock(); logger != nullptr) {
121  TF_RETURN_IF_ERROR(logger->Log(log));
122  }
123  return OkStatus();
124  });
125 }
126 
127 } // namespace serving
128 } // namespace tensorflow
129 
130 #endif // TENSORFLOW_SERVING_CORE_REQUEST_LOGGER_H_