16 #include "tensorflow_serving/batching/batching_util.h"
23 #include "tensorflow/core/framework/register_types.h"
24 #include "tensorflow/core/framework/tensor.h"
25 #include "tensorflow/core/framework/types.h"
26 #include "tensorflow/core/lib/core/errors.h"
27 #include "tensorflow/core/platform/env_time.h"
29 namespace tensorflow {
53 template <
int num_dims>
54 Eigen::array<OneDimPadding, num_dims> CreatePadding(
55 Tensor tensor, absl::Span<const int> max_dim_sizes) {
56 Eigen::array<OneDimPadding, num_dims> padding;
57 for (
unsigned int i = 0; i < max_dim_sizes.size(); ++i) {
58 if (i > 0 && max_dim_sizes[i] - tensor.dim_size(i) > 0) {
59 padding[i] = {0, max_dim_sizes[i] - tensor.dim_size(i)};
72 template <
typename T,
int num_dims>
74 Status operator()(Tensor input,
75 const Eigen::array<OneDimPadding, num_dims>& padding,
77 TensorShape output_shape;
78 for (
int d = 0; d < num_dims; ++d) {
80 const int32 before_d = padding[d].first;
82 const int32 after_d = padding[d].second;
83 output_shape.AddDim(before_d + input.dim_size(d) + after_d);
85 if (output_shape.num_elements() == input.NumElements()) {
86 bool result = output->CopyFrom(input, output_shape);
88 return errors::Internal(
"Couldn't create output.");
90 return absl::OkStatus();
92 if (input.NumElements() < 1) {
93 return errors::InvalidArgument(
94 "Got empty tensor in batch of non-empty tensors.");
96 *output = Tensor(input.dtype(), output_shape);
97 typename TTypes<T, num_dims>::Tensor inputs = input.tensor<T, num_dims>();
98 T pad_value(input.flat<T>()(0));
99 output->tensor<T, num_dims>() = inputs.pad(padding, pad_value);
100 return absl::OkStatus();
106 template <
typename T>
107 Status PadTensorOfSpecificType(
const Tensor& tensor,
108 absl::Span<const int> max_dim_sizes,
109 Tensor* output_tensor) {
110 int num_dims = tensor.dims();
113 Eigen::array<OneDimPadding, 1> padding;
114 padding = CreatePadding<1>(tensor, max_dim_sizes);
116 return padding_functor(tensor, padding, output_tensor);
119 Eigen::array<OneDimPadding, 2> padding;
120 padding = CreatePadding<2>(tensor, max_dim_sizes);
121 PadTensor<T, 2> padding_functor = PadTensor<T, 2>();
122 return padding_functor(tensor, padding, output_tensor);
125 Eigen::array<OneDimPadding, 3> padding;
126 padding = CreatePadding<3>(tensor, max_dim_sizes);
127 PadTensor<T, 3> padding_functor = PadTensor<T, 3>();
128 return padding_functor(tensor, padding, output_tensor);
131 Eigen::array<OneDimPadding, 4> padding;
132 padding = CreatePadding<4>(tensor, max_dim_sizes);
133 PadTensor<T, 4> padding_functor = PadTensor<T, 4>();
134 return padding_functor(tensor, padding, output_tensor);
137 Eigen::array<OneDimPadding, 5> padding;
138 padding = CreatePadding<5>(tensor, max_dim_sizes);
139 PadTensor<T, 5> padding_functor = PadTensor<T, 5>();
140 return padding_functor(tensor, padding, output_tensor);
143 Eigen::array<OneDimPadding, 6> padding;
144 padding = CreatePadding<6>(tensor, max_dim_sizes);
145 PadTensor<T, 6> padding_functor = PadTensor<T, 6>();
146 return padding_functor(tensor, padding, output_tensor);
151 return errors::InvalidArgument(
152 "Only tensors with rank from 1 to 6 can be padded.");
156 std::map<string, std::vector<int>> CalculateMaxDimSizes(
157 const std::vector<std::vector<std::pair<string, Tensor>>>& batch) {
158 std::map<string, std::vector<int>> max_dim_sizes;
161 const std::vector<std::pair<string, Tensor>>& task_inputs = batch[0];
162 for (
const auto& entry : task_inputs) {
163 const string& tensor_name = entry.first;
164 const Tensor& tensor = entry.second;
165 max_dim_sizes[tensor_name] = std::vector<int>(tensor.dims(), 0);
168 for (
int i = 0; i < batch.size(); ++i) {
169 const std::vector<std::pair<string, Tensor>>& task_inputs = batch[i];
170 for (
const auto& entry : task_inputs) {
171 const string& tensor_name = entry.first;
172 const Tensor& tensor = entry.second;
174 std::vector<int>& max_dim_sizes_for_one_input =
175 max_dim_sizes[tensor_name];
176 for (
int j = 0; j < tensor.dims(); ++j) {
177 const int old_max_size = max_dim_sizes_for_one_input[j];
178 if (tensor.shape().dim_size(j) > old_max_size) {
179 max_dim_sizes_for_one_input[j] = tensor.shape().dim_size(j);
184 return max_dim_sizes;
187 Status AddPadding(
const Tensor& tensor, absl::Span<const int> max_dim_sizes,
188 Tensor* padded_tensor) {
189 const DataType input_dtype = tensor.dtype();
190 Status padding_status;
192 case DataTypeToEnum<type>::value: { \
194 PadTensorOfSpecificType<type>(tensor, max_dim_sizes, padded_tensor); \
197 switch (input_dtype) {
198 TF_CALL_ALL_TYPES(CASE);
199 TF_CALL_QUANTIZED_TYPES(CASE);
201 TF_CALL_quint16(CASE);
202 TF_CALL_qint16(CASE);
204 padding_status = errors::InvalidArgument(
"Unsupported type");
207 return padding_status;
210 int RoundToLowestAllowedBatchSize(absl::Span<const int> allowed_batch_sizes,
212 if (allowed_batch_sizes.empty()) {
215 for (
int allowed_size : allowed_batch_sizes) {
216 if (allowed_size >= batch_size) {
222 LOG(WARNING) <<
"Input batch size " << batch_size
223 <<
" is greater than largest allowed size "
224 << *allowed_batch_sizes.rbegin()
225 <<
" ignoring allowed sizes constraint.";
229 bool AreShapesEqualExceptZeroDim(
const TensorShape& shape1,
230 const TensorShape& shape2) {
231 if (shape1.dims() != shape2.dims()) {
234 for (
int i = 1; i < shape1.dims(); ++i) {
235 if (shape1.dim_size(i) != shape2.dim_size(i)) {